From f4199be2161e3fce2a0edf747fc7db3c81febc92 Mon Sep 17 00:00:00 2001 From: James Barford-Evans Date: Fri, 14 Mar 2025 10:49:35 +0000 Subject: [PATCH 1/2] refactor - arm_shared intrinsics are now YAML, where possible use anchor tags --- .../src/arm_shared/neon/generated.rs | 31712 ++++++++++------ crates/core_arch/src/arm_shared/neon/mod.rs | 7093 ---- .../spec/neon/aarch64.spec.yml | 34 - .../spec/neon/arm_shared.spec.yml | 3713 +- 4 files changed, 21816 insertions(+), 20736 deletions(-) diff --git a/crates/core_arch/src/arm_shared/neon/generated.rs b/crates/core_arch/src/arm_shared/neon/generated.rs index dddbf5e101..d797c3c589 100644 --- a/crates/core_arch/src/arm_shared/neon/generated.rs +++ b/crates/core_arch/src/arm_shared/neon/generated.rs @@ -404,6 +404,132 @@ fn priv_vpadalq_u32(a: uint64x2_t, b: uint32x4_t) -> uint64x2_t { } unsafe { _priv_vpadalq_u32(a, b) } } +#[doc = "Absolute difference and accumulate (64-bit)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaba_s16)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vaba.s16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(saba) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vaba_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { + unsafe { simd_add(a, vabd_s16(b, c)) } +} +#[doc = "Absolute difference and accumulate (64-bit)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaba_s32)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vaba.s32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(saba) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vaba_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { + unsafe { simd_add(a, vabd_s32(b, c)) } +} +#[doc = "Absolute difference and accumulate (64-bit)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaba_s8)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vaba.s8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(saba) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vaba_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { + unsafe { simd_add(a, vabd_s8(b, c)) } +} +#[doc = "Absolute difference and accumulate (64-bit)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaba_u16)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vaba.u16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uaba) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vaba_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x4_t) -> uint16x4_t { + unsafe { simd_add(a, vabd_u16(b, c)) } +} +#[doc = "Absolute difference and accumulate (64-bit)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaba_u32)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vaba.u32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uaba) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vaba_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x2_t) -> uint32x2_t { + unsafe { simd_add(a, vabd_u32(b, c)) } +} +#[doc = "Absolute difference and accumulate (64-bit)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaba_u8)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vaba.u8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uaba) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vaba_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t { + unsafe { simd_add(a, vabd_u8(b, c)) } +} #[doc = "Signed Absolute difference and Accumulate Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_s8)"] #[inline] @@ -545,6 +671,132 @@ pub fn vabal_u32(a: uint64x2_t, b: uint32x2_t, c: uint32x2_t) -> uint64x2_t { let d: uint32x2_t = vabd_u32(b, c); unsafe { simd_add(a, simd_cast(d)) } } +#[doc = "Absolute difference and accumulate (128-bit)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabaq_s16)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vaba.s16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(saba) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vabaq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { + unsafe { simd_add(a, vabdq_s16(b, c)) } +} +#[doc = "Absolute difference and accumulate (128-bit)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabaq_s32)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vaba.s32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(saba) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vabaq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { + unsafe { simd_add(a, vabdq_s32(b, c)) } +} +#[doc = "Absolute difference and accumulate (128-bit)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabaq_s8)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vaba.s8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(saba) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vabaq_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t { + unsafe { simd_add(a, vabdq_s8(b, c)) } +} +#[doc = "Absolute difference and accumulate (128-bit)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabaq_u16)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vaba.u16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uaba) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vabaq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t { + unsafe { simd_add(a, vabdq_u16(b, c)) } +} +#[doc = "Absolute difference and accumulate (128-bit)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabaq_u32)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vaba.u32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uaba) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vabaq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { + unsafe { simd_add(a, vabdq_u32(b, c)) } +} +#[doc = "Absolute difference and accumulate (128-bit)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabaq_u8)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vaba.u8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uaba) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vabaq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t { + unsafe { simd_add(a, vabdq_u8(b, c)) } +} #[doc = "Absolute difference between the arguments of Floating"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabd_f16)"] #[inline] @@ -1416,6 +1668,342 @@ pub fn vadd_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { pub fn vaddq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { unsafe { simd_add(a, b) } } +#[doc = "Vector add."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vadd_f32)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vadd))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fadd) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vadd_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { + unsafe { simd_add(a, b) } +} +#[doc = "Vector add."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vadd_s16)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vadd))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(add) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + unsafe { simd_add(a, b) } +} +#[doc = "Vector add."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vadd_s32)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vadd))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(add) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + unsafe { simd_add(a, b) } +} +#[doc = "Vector add."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vadd_s8)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vadd))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(add) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + unsafe { simd_add(a, b) } +} +#[doc = "Vector add."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vadd_u16)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vadd))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(add) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + unsafe { simd_add(a, b) } +} +#[doc = "Vector add."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vadd_u32)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vadd))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(add) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + unsafe { simd_add(a, b) } +} +#[doc = "Vector add."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vadd_u8)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vadd))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(add) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + unsafe { simd_add(a, b) } +} +#[doc = "Vector add."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddq_f32)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vadd))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fadd) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vaddq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { + unsafe { simd_add(a, b) } +} +#[doc = "Vector add."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddq_s16)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vadd))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(add) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + unsafe { simd_add(a, b) } +} +#[doc = "Vector add."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddq_s32)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vadd))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(add) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + unsafe { simd_add(a, b) } +} +#[doc = "Vector add."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddq_s64)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vadd))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(add) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vaddq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { + unsafe { simd_add(a, b) } +} +#[doc = "Vector add."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddq_s8)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vadd))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(add) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + unsafe { simd_add(a, b) } +} +#[doc = "Vector add."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddq_u16)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vadd))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(add) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + unsafe { simd_add(a, b) } +} +#[doc = "Vector add."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddq_u32)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vadd))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(add) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + unsafe { simd_add(a, b) } +} +#[doc = "Vector add."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddq_u64)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vadd))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(add) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vaddq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + unsafe { simd_add(a, b) } +} +#[doc = "Vector add."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddq_u8)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vadd))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(add) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + unsafe { simd_add(a, b) } +} #[doc = "Bitwise exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vadd_p8)"] #[inline] @@ -1556,15 +2144,15 @@ pub fn vaddq_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { pub fn vaddh_f16(a: f16, b: f16) -> f16 { a + b } -#[doc = "Bitwise exclusive OR"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddq_p128)"] +#[doc = "Add returning High Narrow (high half)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddhn_high_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddhn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(addhn2) )] #[cfg_attr( not(target_arch = "arm"), @@ -1574,118 +2162,69 @@ pub fn vaddh_f16(a: f16, b: f16) -> f16 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vaddq_p128(a: p128, b: p128) -> p128 { - a ^ b +pub fn vaddhn_high_s16(r: int8x8_t, a: int16x8_t, b: int16x8_t) -> int8x16_t { + unsafe { + let x = simd_cast(simd_shr(simd_add(a, b), int16x8_t::splat(8))); + simd_shuffle!(r, x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) + } } -#[doc = "AES single round encryption."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaesdq_u8)"] +#[doc = "Add returning High Narrow (high half)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddhn_high_s32)"] #[inline] -#[target_feature(enable = "aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(test, assert_instr(aesd))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddhn))] #[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(addhn2) )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] -pub fn vaesdq_u8(data: uint8x16_t, key: uint8x16_t) -> uint8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.aesd" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.aesd")] - fn _vaesdq_u8(data: uint8x16_t, key: uint8x16_t) -> uint8x16_t; - } - unsafe { _vaesdq_u8(data, key) } -} -#[doc = "AES single round encryption."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaeseq_u8)"] -#[inline] -#[target_feature(enable = "aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(test, assert_instr(aese))] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") -)] -pub fn vaeseq_u8(data: uint8x16_t, key: uint8x16_t) -> uint8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.aese" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.aese")] - fn _vaeseq_u8(data: uint8x16_t, key: uint8x16_t) -> uint8x16_t; +pub fn vaddhn_high_s32(r: int16x4_t, a: int32x4_t, b: int32x4_t) -> int16x8_t { + unsafe { + let x = simd_cast(simd_shr(simd_add(a, b), int32x4_t::splat(16))); + simd_shuffle!(r, x, [0, 1, 2, 3, 4, 5, 6, 7]) } - unsafe { _vaeseq_u8(data, key) } } -#[doc = "AES inverse mix columns."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaesimcq_u8)"] +#[doc = "Add returning High Narrow (high half)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddhn_high_s64)"] #[inline] -#[target_feature(enable = "aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(test, assert_instr(aesimc))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddhn))] #[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(addhn2) )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] -pub fn vaesimcq_u8(data: uint8x16_t) -> uint8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.aesimc" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.aesimc")] - fn _vaesimcq_u8(data: uint8x16_t) -> uint8x16_t; - } - unsafe { _vaesimcq_u8(data) } -} -#[doc = "AES mix columns."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaesmcq_u8)"] -#[inline] -#[target_feature(enable = "aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(test, assert_instr(aesmc))] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") -)] -pub fn vaesmcq_u8(data: uint8x16_t) -> uint8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.aesmc" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.aesmc")] - fn _vaesmcq_u8(data: uint8x16_t) -> uint8x16_t; +pub fn vaddhn_high_s64(r: int32x2_t, a: int64x2_t, b: int64x2_t) -> int32x4_t { + unsafe { + let x = simd_cast(simd_shr(simd_add(a, b), int64x2_t::splat(32))); + simd_shuffle!(r, x, [0, 1, 2, 3]) } - unsafe { _vaesmcq_u8(data) } } -#[doc = "Vector bitwise and"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vand_s8)"] +#[doc = "Add returning High Narrow (high half)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddhn_high_u16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddhn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(and) + assert_instr(addhn2) )] #[cfg_attr( not(target_arch = "arm"), @@ -1695,18 +2234,21 @@ pub fn vaesmcq_u8(data: uint8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vand_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - unsafe { simd_and(a, b) } +pub fn vaddhn_high_u16(r: uint8x8_t, a: uint16x8_t, b: uint16x8_t) -> uint8x16_t { + unsafe { + let x = simd_cast(simd_shr(simd_add(a, b), uint16x8_t::splat(8))); + simd_shuffle!(r, x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) + } } -#[doc = "Vector bitwise and"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vandq_s8)"] +#[doc = "Add returning High Narrow (high half)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddhn_high_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddhn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(and) + assert_instr(addhn2) )] #[cfg_attr( not(target_arch = "arm"), @@ -1716,18 +2258,21 @@ pub fn vand_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vandq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - unsafe { simd_and(a, b) } +pub fn vaddhn_high_u32(r: uint16x4_t, a: uint32x4_t, b: uint32x4_t) -> uint16x8_t { + unsafe { + let x = simd_cast(simd_shr(simd_add(a, b), uint32x4_t::splat(16))); + simd_shuffle!(r, x, [0, 1, 2, 3, 4, 5, 6, 7]) + } } -#[doc = "Vector bitwise and"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vand_s16)"] +#[doc = "Add returning High Narrow (high half)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddhn_high_u64)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddhn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(and) + assert_instr(addhn2) )] #[cfg_attr( not(target_arch = "arm"), @@ -1737,18 +2282,21 @@ pub fn vandq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vand_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - unsafe { simd_and(a, b) } +pub fn vaddhn_high_u64(r: uint32x2_t, a: uint64x2_t, b: uint64x2_t) -> uint32x4_t { + unsafe { + let x = simd_cast(simd_shr(simd_add(a, b), uint64x2_t::splat(32))); + simd_shuffle!(r, x, [0, 1, 2, 3]) + } } -#[doc = "Vector bitwise and"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vandq_s16)"] +#[doc = "Add returning High Narrow."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddhn_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddhn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(and) + assert_instr(addhn) )] #[cfg_attr( not(target_arch = "arm"), @@ -1758,18 +2306,18 @@ pub fn vand_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vandq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - unsafe { simd_and(a, b) } +pub fn vaddhn_s16(a: int16x8_t, b: int16x8_t) -> int8x8_t { + unsafe { simd_cast(simd_shr(simd_add(a, b), int16x8_t::splat(8))) } } -#[doc = "Vector bitwise and"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vand_s32)"] +#[doc = "Add returning High Narrow."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddhn_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddhn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(and) + assert_instr(addhn) )] #[cfg_attr( not(target_arch = "arm"), @@ -1779,18 +2327,18 @@ pub fn vandq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vand_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - unsafe { simd_and(a, b) } +pub fn vaddhn_s32(a: int32x4_t, b: int32x4_t) -> int16x4_t { + unsafe { simd_cast(simd_shr(simd_add(a, b), int32x4_t::splat(16))) } } -#[doc = "Vector bitwise and"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vandq_s32)"] +#[doc = "Add returning High Narrow."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddhn_s64)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddhn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(and) + assert_instr(addhn) )] #[cfg_attr( not(target_arch = "arm"), @@ -1800,18 +2348,18 @@ pub fn vand_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vandq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - unsafe { simd_and(a, b) } +pub fn vaddhn_s64(a: int64x2_t, b: int64x2_t) -> int32x2_t { + unsafe { simd_cast(simd_shr(simd_add(a, b), int64x2_t::splat(32))) } } -#[doc = "Vector bitwise and"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vand_s64)"] +#[doc = "Add returning High Narrow."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddhn_u16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddhn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(and) + assert_instr(addhn) )] #[cfg_attr( not(target_arch = "arm"), @@ -1821,18 +2369,18 @@ pub fn vandq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vand_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { - unsafe { simd_and(a, b) } +pub fn vaddhn_u16(a: uint16x8_t, b: uint16x8_t) -> uint8x8_t { + unsafe { simd_cast(simd_shr(simd_add(a, b), uint16x8_t::splat(8))) } } -#[doc = "Vector bitwise and"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vandq_s64)"] +#[doc = "Add returning High Narrow."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddhn_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddhn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(and) + assert_instr(addhn) )] #[cfg_attr( not(target_arch = "arm"), @@ -1842,18 +2390,18 @@ pub fn vand_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vandq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - unsafe { simd_and(a, b) } +pub fn vaddhn_u32(a: uint32x4_t, b: uint32x4_t) -> uint16x4_t { + unsafe { simd_cast(simd_shr(simd_add(a, b), uint32x4_t::splat(16))) } } -#[doc = "Vector bitwise and"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vand_u8)"] +#[doc = "Add returning High Narrow."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddhn_u64)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddhn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(and) + assert_instr(addhn) )] #[cfg_attr( not(target_arch = "arm"), @@ -1863,18 +2411,18 @@ pub fn vandq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vand_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - unsafe { simd_and(a, b) } +pub fn vaddhn_u64(a: uint64x2_t, b: uint64x2_t) -> uint32x2_t { + unsafe { simd_cast(simd_shr(simd_add(a, b), uint64x2_t::splat(32))) } } -#[doc = "Vector bitwise and"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vandq_u8)"] +#[doc = "Signed Add Long (vector, high half)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddl_high_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(and) + assert_instr(saddl2) )] #[cfg_attr( not(target_arch = "arm"), @@ -1884,18 +2432,24 @@ pub fn vand_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vandq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - unsafe { simd_and(a, b) } +pub fn vaddl_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t { + unsafe { + let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]); + let b: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]); + let a: int32x4_t = simd_cast(a); + let b: int32x4_t = simd_cast(b); + simd_add(a, b) + } } -#[doc = "Vector bitwise and"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vand_u16)"] +#[doc = "Signed Add Long (vector, high half)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddl_high_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(and) + assert_instr(saddl2) )] #[cfg_attr( not(target_arch = "arm"), @@ -1905,18 +2459,24 @@ pub fn vandq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vand_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - unsafe { simd_and(a, b) } +pub fn vaddl_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t { + unsafe { + let a: int32x2_t = simd_shuffle!(a, a, [2, 3]); + let b: int32x2_t = simd_shuffle!(b, b, [2, 3]); + let a: int64x2_t = simd_cast(a); + let b: int64x2_t = simd_cast(b); + simd_add(a, b) + } } -#[doc = "Vector bitwise and"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vandq_u16)"] +#[doc = "Signed Add Long (vector, high half)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddl_high_s8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(and) + assert_instr(saddl2) )] #[cfg_attr( not(target_arch = "arm"), @@ -1926,18 +2486,24 @@ pub fn vand_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vandq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - unsafe { simd_and(a, b) } +pub fn vaddl_high_s8(a: int8x16_t, b: int8x16_t) -> int16x8_t { + unsafe { + let a: int8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]); + let b: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]); + let a: int16x8_t = simd_cast(a); + let b: int16x8_t = simd_cast(b); + simd_add(a, b) + } } -#[doc = "Vector bitwise and"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vand_u32)"] +#[doc = "Signed Add Long (vector, high half)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddl_high_u16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(and) + assert_instr(uaddl2) )] #[cfg_attr( not(target_arch = "arm"), @@ -1947,18 +2513,24 @@ pub fn vandq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vand_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - unsafe { simd_and(a, b) } +pub fn vaddl_high_u16(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t { + unsafe { + let a: uint16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]); + let b: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]); + let a: uint32x4_t = simd_cast(a); + let b: uint32x4_t = simd_cast(b); + simd_add(a, b) + } } -#[doc = "Vector bitwise and"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vandq_u32)"] +#[doc = "Signed Add Long (vector, high half)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddl_high_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(and) + assert_instr(uaddl2) )] #[cfg_attr( not(target_arch = "arm"), @@ -1968,18 +2540,24 @@ pub fn vand_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vandq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - unsafe { simd_and(a, b) } +pub fn vaddl_high_u32(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t { + unsafe { + let a: uint32x2_t = simd_shuffle!(a, a, [2, 3]); + let b: uint32x2_t = simd_shuffle!(b, b, [2, 3]); + let a: uint64x2_t = simd_cast(a); + let b: uint64x2_t = simd_cast(b); + simd_add(a, b) + } } -#[doc = "Vector bitwise and"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vand_u64)"] +#[doc = "Signed Add Long (vector, high half)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddl_high_u8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(and) + assert_instr(uaddl2) )] #[cfg_attr( not(target_arch = "arm"), @@ -1989,18 +2567,24 @@ pub fn vandq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vand_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { - unsafe { simd_and(a, b) } +pub fn vaddl_high_u8(a: uint8x16_t, b: uint8x16_t) -> uint16x8_t { + unsafe { + let a: uint8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]); + let b: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]); + let a: uint16x8_t = simd_cast(a); + let b: uint16x8_t = simd_cast(b); + simd_add(a, b) + } } -#[doc = "Vector bitwise and"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vandq_u64)"] +#[doc = "Add Long (vector)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddl_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(and) + assert_instr(saddl) )] #[cfg_attr( not(target_arch = "arm"), @@ -2010,62 +2594,22 @@ pub fn vand_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vandq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - unsafe { simd_and(a, b) } -} -#[doc = "Floating-point absolute compare greater than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcage_f16)"] -#[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacge.f16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(facge) -)] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vcage_f16(a: float16x4_t, b: float16x4_t) -> uint16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vacge.v4i16.v4f16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.facge.v4i16.v4f16" - )] - fn _vcage_f16(a: float16x4_t, b: float16x4_t) -> uint16x4_t; - } - unsafe { _vcage_f16(a, b) } -} -#[doc = "Floating-point absolute compare greater than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcageq_f16)"] -#[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacge.f16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(facge) -)] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vcageq_f16(a: float16x8_t, b: float16x8_t) -> uint16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vacge.v8i16.v8f16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.facge.v8i16.v8f16" - )] - fn _vcageq_f16(a: float16x8_t, b: float16x8_t) -> uint16x8_t; +pub fn vaddl_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t { + unsafe { + let a: int32x4_t = simd_cast(a); + let b: int32x4_t = simd_cast(b); + simd_add(a, b) } - unsafe { _vcageq_f16(a, b) } } -#[doc = "Floating-point absolute compare greater than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcage_f32)"] +#[doc = "Add Long (vector)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddl_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacge.f32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(facge) + assert_instr(saddl) )] #[cfg_attr( not(target_arch = "arm"), @@ -2075,26 +2619,22 @@ pub fn vcageq_f16(a: float16x8_t, b: float16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vcage_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vacge.v2i32.v2f32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.facge.v2i32.v2f32" - )] - fn _vcage_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t; +pub fn vaddl_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { + unsafe { + let a: int64x2_t = simd_cast(a); + let b: int64x2_t = simd_cast(b); + simd_add(a, b) } - unsafe { _vcage_f32(a, b) } } -#[doc = "Floating-point absolute compare greater than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcageq_f32)"] +#[doc = "Add Long (vector)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddl_s8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacge.f32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(facge) + assert_instr(saddl) )] #[cfg_attr( not(target_arch = "arm"), @@ -2104,70 +2644,47 @@ pub fn vcage_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vcageq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vacge.v4i32.v4f32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.facge.v4i32.v4f32" - )] - fn _vcageq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t; +pub fn vaddl_s8(a: int8x8_t, b: int8x8_t) -> int16x8_t { + unsafe { + let a: int16x8_t = simd_cast(a); + let b: int16x8_t = simd_cast(b); + simd_add(a, b) } - unsafe { _vcageq_f32(a, b) } } -#[doc = "Floating-point absolute compare greater than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagt_f16)"] +#[doc = "Add Long (vector)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddl_u16)"] #[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacgt.f16"))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(facgt) + assert_instr(uaddl) )] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vcagt_f16(a: float16x4_t, b: float16x4_t) -> uint16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vacgt.v4i16.v4f16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.facgt.v4i16.v4f16" - )] - fn _vcagt_f16(a: float16x4_t, b: float16x4_t) -> uint16x4_t; - } - unsafe { _vcagt_f16(a, b) } -} -#[doc = "Floating-point absolute compare greater than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagtq_f16)"] -#[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacgt.f16"))] #[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(facgt) + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") )] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vcagtq_f16(a: float16x8_t, b: float16x8_t) -> uint16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vacgt.v8i16.v8f16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.facgt.v8i16.v8f16" - )] - fn _vcagtq_f16(a: float16x8_t, b: float16x8_t) -> uint16x8_t; +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vaddl_u16(a: uint16x4_t, b: uint16x4_t) -> uint32x4_t { + unsafe { + let a: uint32x4_t = simd_cast(a); + let b: uint32x4_t = simd_cast(b); + simd_add(a, b) } - unsafe { _vcagtq_f16(a, b) } } -#[doc = "Floating-point absolute compare greater than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagt_f32)"] +#[doc = "Add Long (vector)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddl_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacgt.f32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(facgt) + assert_instr(uaddl) )] #[cfg_attr( not(target_arch = "arm"), @@ -2177,26 +2694,22 @@ pub fn vcagtq_f16(a: float16x8_t, b: float16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vcagt_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vacgt.v2i32.v2f32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.facgt.v2i32.v2f32" - )] - fn _vcagt_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t; +pub fn vaddl_u32(a: uint32x2_t, b: uint32x2_t) -> uint64x2_t { + unsafe { + let a: uint64x2_t = simd_cast(a); + let b: uint64x2_t = simd_cast(b); + simd_add(a, b) } - unsafe { _vcagt_f32(a, b) } } -#[doc = "Floating-point absolute compare greater than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagtq_f32)"] +#[doc = "Add Long (vector)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddl_u8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacgt.f32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(facgt) + assert_instr(uaddl) )] #[cfg_attr( not(target_arch = "arm"), @@ -2206,54 +2719,22 @@ pub fn vcagt_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vcagtq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vacgt.v4i32.v4f32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.facgt.v4i32.v4f32" - )] - fn _vcagtq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t; +pub fn vaddl_u8(a: uint8x8_t, b: uint8x8_t) -> uint16x8_t { + unsafe { + let a: uint16x8_t = simd_cast(a); + let b: uint16x8_t = simd_cast(b); + simd_add(a, b) } - unsafe { _vcagtq_f32(a, b) } -} -#[doc = "Floating-point absolute compare less than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcale_f16)"] -#[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacge.f16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(facge) -)] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vcale_f16(a: float16x4_t, b: float16x4_t) -> uint16x4_t { - vcage_f16(b, a) -} -#[doc = "Floating-point absolute compare less than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaleq_f16)"] -#[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacge.f16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(facge) -)] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vcaleq_f16(a: float16x8_t, b: float16x8_t) -> uint16x8_t { - vcageq_f16(b, a) } -#[doc = "Floating-point absolute compare less than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcale_f32)"] +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddq_p128)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacge.f32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(facge) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -2263,18 +2744,18 @@ pub fn vcaleq_f16(a: float16x8_t, b: float16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vcale_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { - vcage_f32(b, a) +pub fn vaddq_p128(a: p128, b: p128) -> p128 { + a ^ b } -#[doc = "Floating-point absolute compare less than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaleq_f32)"] +#[doc = "Add Wide (high half)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddw_high_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacge.f32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddw))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(facge) + assert_instr(saddw2) )] #[cfg_attr( not(target_arch = "arm"), @@ -2284,46 +2765,22 @@ pub fn vcale_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vcaleq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { - vcageq_f32(b, a) -} -#[doc = "Floating-point absolute compare less than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcalt_f16)"] -#[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacgt.f16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(facgt) -)] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vcalt_f16(a: float16x4_t, b: float16x4_t) -> uint16x4_t { - vcagt_f16(b, a) -} -#[doc = "Floating-point absolute compare less than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaltq_f16)"] -#[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacgt.f16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(facgt) -)] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vcaltq_f16(a: float16x8_t, b: float16x8_t) -> uint16x8_t { - vcagtq_f16(b, a) +pub fn vaddw_high_s16(a: int32x4_t, b: int16x8_t) -> int32x4_t { + unsafe { + let b: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]); + let b: int32x4_t = simd_cast(b); + simd_add(a, b) + } } -#[doc = "Floating-point absolute compare less than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcalt_f32)"] +#[doc = "Add Wide (high half)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddw_high_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacgt.f32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddw))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(facgt) + assert_instr(saddw2) )] #[cfg_attr( not(target_arch = "arm"), @@ -2333,18 +2790,22 @@ pub fn vcaltq_f16(a: float16x8_t, b: float16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vcalt_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { - vcagt_f32(b, a) +pub fn vaddw_high_s32(a: int64x2_t, b: int32x4_t) -> int64x2_t { + unsafe { + let b: int32x2_t = simd_shuffle!(b, b, [2, 3]); + let b: int64x2_t = simd_cast(b); + simd_add(a, b) + } } -#[doc = "Floating-point absolute compare less than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaltq_f32)"] +#[doc = "Add Wide (high half)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddw_high_s8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacgt.f32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddw))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(facgt) + assert_instr(saddw2) )] #[cfg_attr( not(target_arch = "arm"), @@ -2354,46 +2815,47 @@ pub fn vcalt_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vcaltq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { - vcagtq_f32(b, a) +pub fn vaddw_high_s8(a: int16x8_t, b: int8x16_t) -> int16x8_t { + unsafe { + let b: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]); + let b: int16x8_t = simd_cast(b); + simd_add(a, b) + } } -#[doc = "Floating-point compare equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_f16)"] +#[doc = "Add Wide (high half)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddw_high_u16)"] #[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.f16"))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddw))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fcmeq) + assert_instr(uaddw2) )] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vceq_f16(a: float16x4_t, b: float16x4_t) -> uint16x4_t { - unsafe { simd_eq(a, b) } -} -#[doc = "Floating-point compare equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_f16)"] -#[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.f16"))] #[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fcmeq) + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") )] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vceqq_f16(a: float16x8_t, b: float16x8_t) -> uint16x8_t { - unsafe { simd_eq(a, b) } +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vaddw_high_u16(a: uint32x4_t, b: uint16x8_t) -> uint32x4_t { + unsafe { + let b: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]); + let b: uint32x4_t = simd_cast(b); + simd_add(a, b) + } } -#[doc = "Floating-point compare equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_f32)"] +#[doc = "Add Wide (high half)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddw_high_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.f32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddw))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fcmeq) + assert_instr(uaddw2) )] #[cfg_attr( not(target_arch = "arm"), @@ -2403,18 +2865,22 @@ pub fn vceqq_f16(a: float16x8_t, b: float16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vceq_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { - unsafe { simd_eq(a, b) } +pub fn vaddw_high_u32(a: uint64x2_t, b: uint32x4_t) -> uint64x2_t { + unsafe { + let b: uint32x2_t = simd_shuffle!(b, b, [2, 3]); + let b: uint64x2_t = simd_cast(b); + simd_add(a, b) + } } -#[doc = "Floating-point compare equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_f32)"] +#[doc = "Add Wide (high half)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddw_high_u8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.f32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddw))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fcmeq) + assert_instr(uaddw2) )] #[cfg_attr( not(target_arch = "arm"), @@ -2424,18 +2890,22 @@ pub fn vceq_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vceqq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { - unsafe { simd_eq(a, b) } +pub fn vaddw_high_u8(a: uint16x8_t, b: uint8x16_t) -> uint16x8_t { + unsafe { + let b: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]); + let b: uint16x8_t = simd_cast(b); + simd_add(a, b) + } } -#[doc = "Compare bitwise Equal (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_s8)"] +#[doc = "Add Wide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddw_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddw))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmeq) + assert_instr(saddw) )] #[cfg_attr( not(target_arch = "arm"), @@ -2445,18 +2915,21 @@ pub fn vceqq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vceq_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { - unsafe { simd_eq(a, b) } +pub fn vaddw_s16(a: int32x4_t, b: int16x4_t) -> int32x4_t { + unsafe { + let b: int32x4_t = simd_cast(b); + simd_add(a, b) + } } -#[doc = "Compare bitwise Equal (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_s8)"] +#[doc = "Add Wide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddw_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddw))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmeq) + assert_instr(saddw) )] #[cfg_attr( not(target_arch = "arm"), @@ -2466,18 +2939,21 @@ pub fn vceq_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vceqq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { - unsafe { simd_eq(a, b) } +pub fn vaddw_s32(a: int64x2_t, b: int32x2_t) -> int64x2_t { + unsafe { + let b: int64x2_t = simd_cast(b); + simd_add(a, b) + } } -#[doc = "Compare bitwise Equal (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_s16)"] +#[doc = "Add Wide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddw_s8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddw))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmeq) + assert_instr(saddw) )] #[cfg_attr( not(target_arch = "arm"), @@ -2487,18 +2963,21 @@ pub fn vceqq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vceq_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { - unsafe { simd_eq(a, b) } +pub fn vaddw_s8(a: int16x8_t, b: int8x8_t) -> int16x8_t { + unsafe { + let b: int16x8_t = simd_cast(b); + simd_add(a, b) + } } -#[doc = "Compare bitwise Equal (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_s16)"] +#[doc = "Add Wide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddw_u16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddw))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmeq) + assert_instr(uaddw) )] #[cfg_attr( not(target_arch = "arm"), @@ -2508,18 +2987,21 @@ pub fn vceq_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vceqq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { - unsafe { simd_eq(a, b) } +pub fn vaddw_u16(a: uint32x4_t, b: uint16x4_t) -> uint32x4_t { + unsafe { + let b: uint32x4_t = simd_cast(b); + simd_add(a, b) + } } -#[doc = "Compare bitwise Equal (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_s32)"] +#[doc = "Add Wide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddw_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddw))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmeq) + assert_instr(uaddw) )] #[cfg_attr( not(target_arch = "arm"), @@ -2529,18 +3011,21 @@ pub fn vceqq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vceq_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { - unsafe { simd_eq(a, b) } +pub fn vaddw_u32(a: uint64x2_t, b: uint32x2_t) -> uint64x2_t { + unsafe { + let b: uint64x2_t = simd_cast(b); + simd_add(a, b) + } } -#[doc = "Compare bitwise Equal (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_s32)"] +#[doc = "Add Wide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddw_u8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddw))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmeq) + assert_instr(uaddw) )] #[cfg_attr( not(target_arch = "arm"), @@ -2550,60 +3035,121 @@ pub fn vceq_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vceqq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { - unsafe { simd_eq(a, b) } +pub fn vaddw_u8(a: uint16x8_t, b: uint8x8_t) -> uint16x8_t { + unsafe { + let b: uint16x8_t = simd_cast(b); + simd_add(a, b) + } } -#[doc = "Compare bitwise Equal (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_u8)"] +#[doc = "AES single round encryption."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaesdq_u8)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i8"))] +#[target_feature(enable = "aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(test, assert_instr(aesd))] #[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmeq) + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") )] +pub fn vaesdq_u8(data: uint8x16_t, key: uint8x16_t) -> uint8x16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crypto.aesd" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.aesd")] + fn _vaesdq_u8(data: uint8x16_t, key: uint8x16_t) -> uint8x16_t; + } + unsafe { _vaesdq_u8(data, key) } +} +#[doc = "AES single round encryption."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaeseq_u8)"] +#[inline] +#[target_feature(enable = "aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(test, assert_instr(aese))] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vceq_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - unsafe { simd_eq(a, b) } +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") +)] +pub fn vaeseq_u8(data: uint8x16_t, key: uint8x16_t) -> uint8x16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crypto.aese" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.aese")] + fn _vaeseq_u8(data: uint8x16_t, key: uint8x16_t) -> uint8x16_t; + } + unsafe { _vaeseq_u8(data, key) } } -#[doc = "Compare bitwise Equal (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_u8)"] +#[doc = "AES inverse mix columns."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaesimcq_u8)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i8"))] +#[target_feature(enable = "aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(test, assert_instr(aesimc))] #[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmeq) + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") )] +pub fn vaesimcq_u8(data: uint8x16_t) -> uint8x16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crypto.aesimc" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.aesimc")] + fn _vaesimcq_u8(data: uint8x16_t) -> uint8x16_t; + } + unsafe { _vaesimcq_u8(data) } +} +#[doc = "AES mix columns."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaesmcq_u8)"] +#[inline] +#[target_feature(enable = "aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(test, assert_instr(aesmc))] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vceqq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - unsafe { simd_eq(a, b) } +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") +)] +pub fn vaesmcq_u8(data: uint8x16_t) -> uint8x16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crypto.aesmc" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.aesmc")] + fn _vaesmcq_u8(data: uint8x16_t) -> uint8x16_t; + } + unsafe { _vaesmcq_u8(data) } } -#[doc = "Compare bitwise Equal (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_u16)"] +#[doc = "Vector bitwise and"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vand_s8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmeq) + assert_instr(and) )] #[cfg_attr( not(target_arch = "arm"), @@ -2613,18 +3159,18 @@ pub fn vceqq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vceq_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - unsafe { simd_eq(a, b) } +pub fn vand_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + unsafe { simd_and(a, b) } } -#[doc = "Compare bitwise Equal (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_u16)"] +#[doc = "Vector bitwise and"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vandq_s8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmeq) + assert_instr(and) )] #[cfg_attr( not(target_arch = "arm"), @@ -2634,18 +3180,18 @@ pub fn vceq_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vceqq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - unsafe { simd_eq(a, b) } +pub fn vandq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + unsafe { simd_and(a, b) } } -#[doc = "Compare bitwise Equal (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_u32)"] +#[doc = "Vector bitwise and"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vand_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmeq) + assert_instr(and) )] #[cfg_attr( not(target_arch = "arm"), @@ -2655,18 +3201,18 @@ pub fn vceqq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vceq_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - unsafe { simd_eq(a, b) } +pub fn vand_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + unsafe { simd_and(a, b) } } -#[doc = "Compare bitwise Equal (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_u32)"] +#[doc = "Vector bitwise and"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vandq_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmeq) + assert_instr(and) )] #[cfg_attr( not(target_arch = "arm"), @@ -2676,18 +3222,18 @@ pub fn vceq_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vceqq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - unsafe { simd_eq(a, b) } +pub fn vandq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + unsafe { simd_and(a, b) } } -#[doc = "Compare bitwise Equal (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_p8)"] +#[doc = "Vector bitwise and"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vand_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmeq) + assert_instr(and) )] #[cfg_attr( not(target_arch = "arm"), @@ -2697,18 +3243,18 @@ pub fn vceqq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vceq_p8(a: poly8x8_t, b: poly8x8_t) -> uint8x8_t { - unsafe { simd_eq(a, b) } +pub fn vand_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + unsafe { simd_and(a, b) } } -#[doc = "Compare bitwise Equal (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_p8)"] +#[doc = "Vector bitwise and"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vandq_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmeq) + assert_instr(and) )] #[cfg_attr( not(target_arch = "arm"), @@ -2718,46 +3264,39 @@ pub fn vceq_p8(a: poly8x8_t, b: poly8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vceqq_p8(a: poly8x16_t, b: poly8x16_t) -> uint8x16_t { - unsafe { simd_eq(a, b) } +pub fn vandq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + unsafe { simd_and(a, b) } } -#[doc = "Floating-point compare greater than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_f16)"] +#[doc = "Vector bitwise and"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vand_s64)"] #[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.f16"))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fcmge) + assert_instr(and) )] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vcge_f16(a: float16x4_t, b: float16x4_t) -> uint16x4_t { - unsafe { simd_ge(a, b) } -} -#[doc = "Floating-point compare greater than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_f16)"] -#[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.f16"))] #[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fcmge) + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") )] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vcgeq_f16(a: float16x8_t, b: float16x8_t) -> uint16x8_t { - unsafe { simd_ge(a, b) } +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vand_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { + unsafe { simd_and(a, b) } } -#[doc = "Floating-point compare greater than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_f32)"] +#[doc = "Vector bitwise and"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vandq_s64)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.f32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fcmge) + assert_instr(and) )] #[cfg_attr( not(target_arch = "arm"), @@ -2767,18 +3306,18 @@ pub fn vcgeq_f16(a: float16x8_t, b: float16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vcge_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { - unsafe { simd_ge(a, b) } +pub fn vandq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { + unsafe { simd_and(a, b) } } -#[doc = "Floating-point compare greater than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_f32)"] +#[doc = "Vector bitwise and"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vand_u8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.f32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fcmge) + assert_instr(and) )] #[cfg_attr( not(target_arch = "arm"), @@ -2788,18 +3327,18 @@ pub fn vcge_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vcgeq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { - unsafe { simd_ge(a, b) } +pub fn vand_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + unsafe { simd_and(a, b) } } -#[doc = "Compare signed greater than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_s8)"] +#[doc = "Vector bitwise and"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vandq_u8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmge) + assert_instr(and) )] #[cfg_attr( not(target_arch = "arm"), @@ -2809,18 +3348,18 @@ pub fn vcgeq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vcge_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { - unsafe { simd_ge(a, b) } +pub fn vandq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + unsafe { simd_and(a, b) } } -#[doc = "Compare signed greater than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_s8)"] +#[doc = "Vector bitwise and"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vand_u16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmge) + assert_instr(and) )] #[cfg_attr( not(target_arch = "arm"), @@ -2830,18 +3369,18 @@ pub fn vcge_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vcgeq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { - unsafe { simd_ge(a, b) } +pub fn vand_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + unsafe { simd_and(a, b) } } -#[doc = "Compare signed greater than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_s16)"] +#[doc = "Vector bitwise and"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vandq_u16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmge) + assert_instr(and) )] #[cfg_attr( not(target_arch = "arm"), @@ -2851,18 +3390,18 @@ pub fn vcgeq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vcge_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { - unsafe { simd_ge(a, b) } +pub fn vandq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + unsafe { simd_and(a, b) } } -#[doc = "Compare signed greater than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_s16)"] +#[doc = "Vector bitwise and"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vand_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmge) + assert_instr(and) )] #[cfg_attr( not(target_arch = "arm"), @@ -2872,18 +3411,18 @@ pub fn vcge_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vcgeq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { - unsafe { simd_ge(a, b) } +pub fn vand_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + unsafe { simd_and(a, b) } } -#[doc = "Compare signed greater than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_s32)"] +#[doc = "Vector bitwise and"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vandq_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmge) + assert_instr(and) )] #[cfg_attr( not(target_arch = "arm"), @@ -2893,18 +3432,18 @@ pub fn vcgeq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vcge_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { - unsafe { simd_ge(a, b) } +pub fn vandq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + unsafe { simd_and(a, b) } } -#[doc = "Compare signed greater than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_s32)"] +#[doc = "Vector bitwise and"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vand_u64)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmge) + assert_instr(and) )] #[cfg_attr( not(target_arch = "arm"), @@ -2914,18 +3453,18 @@ pub fn vcge_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vcgeq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { - unsafe { simd_ge(a, b) } +pub fn vand_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { + unsafe { simd_and(a, b) } } -#[doc = "Compare unsigned greater than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_u8)"] +#[doc = "Vector bitwise and"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vandq_u64)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmhs) + assert_instr(and) )] #[cfg_attr( not(target_arch = "arm"), @@ -2935,18 +3474,18 @@ pub fn vcgeq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vcge_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - unsafe { simd_ge(a, b) } +pub fn vandq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + unsafe { simd_and(a, b) } } -#[doc = "Compare unsigned greater than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_u8)"] +#[doc = "Vector bitwise bit clear."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbic_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmhs) + assert_instr(bic) )] #[cfg_attr( not(target_arch = "arm"), @@ -2956,18 +3495,19 @@ pub fn vcge_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vcgeq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - unsafe { simd_ge(a, b) } +pub fn vbic_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + let c = int16x4_t::splat(-1); + unsafe { simd_and(simd_xor(b, c), a) } } -#[doc = "Compare unsigned greater than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_u16)"] +#[doc = "Vector bitwise bit clear."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbic_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmhs) + assert_instr(bic) )] #[cfg_attr( not(target_arch = "arm"), @@ -2977,18 +3517,19 @@ pub fn vcgeq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vcge_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - unsafe { simd_ge(a, b) } +pub fn vbic_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + let c = int32x2_t::splat(-1); + unsafe { simd_and(simd_xor(b, c), a) } } -#[doc = "Compare unsigned greater than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_u16)"] +#[doc = "Vector bitwise bit clear."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbic_s64)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmhs) + assert_instr(bic) )] #[cfg_attr( not(target_arch = "arm"), @@ -2998,18 +3539,19 @@ pub fn vcge_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vcgeq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - unsafe { simd_ge(a, b) } +pub fn vbic_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { + let c = int64x1_t::splat(-1); + unsafe { simd_and(simd_xor(b, c), a) } } -#[doc = "Compare unsigned greater than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_u32)"] +#[doc = "Vector bitwise bit clear."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbic_s8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmhs) + assert_instr(bic) )] #[cfg_attr( not(target_arch = "arm"), @@ -3019,18 +3561,19 @@ pub fn vcgeq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vcge_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - unsafe { simd_ge(a, b) } +pub fn vbic_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + let c = int8x8_t::splat(-1); + unsafe { simd_and(simd_xor(b, c), a) } } -#[doc = "Compare unsigned greater than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_u32)"] +#[doc = "Vector bitwise bit clear."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbicq_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmhs) + assert_instr(bic) )] #[cfg_attr( not(target_arch = "arm"), @@ -3040,76 +3583,41 @@ pub fn vcge_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vcgeq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - unsafe { simd_ge(a, b) } -} -#[doc = "Floating-point compare greater than or equal to zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_f16)"] -#[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.f16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fcmge) -)] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vcgez_f16(a: float16x4_t) -> uint16x4_t { - let b: f16x4 = f16x4::new(0.0, 0.0, 0.0, 0.0); - unsafe { simd_ge(a, transmute(b)) } +pub fn vbicq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + let c = int16x8_t::splat(-1); + unsafe { simd_and(simd_xor(b, c), a) } } -#[doc = "Floating-point compare greater than or equal to zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_f16)"] +#[doc = "Vector bitwise bit clear."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbicq_s32)"] #[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.f16"))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fcmge) + assert_instr(bic) )] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vcgezq_f16(a: float16x8_t) -> uint16x8_t { - let b: f16x8 = f16x8::new(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0); - unsafe { simd_ge(a, transmute(b)) } -} -#[doc = "Floating-point compare greater than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_f16)"] -#[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.f16"))] #[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fcmgt) + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") )] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vcgt_f16(a: float16x4_t, b: float16x4_t) -> uint16x4_t { - unsafe { simd_gt(a, b) } -} -#[doc = "Floating-point compare greater than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_f16)"] -#[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.f16"))] #[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fcmgt) + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vcgtq_f16(a: float16x8_t, b: float16x8_t) -> uint16x8_t { - unsafe { simd_gt(a, b) } +pub fn vbicq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + let c = int32x4_t::splat(-1); + unsafe { simd_and(simd_xor(b, c), a) } } -#[doc = "Floating-point compare greater than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_f32)"] +#[doc = "Vector bitwise bit clear."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbicq_s64)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.f32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fcmgt) + assert_instr(bic) )] #[cfg_attr( not(target_arch = "arm"), @@ -3119,18 +3627,19 @@ pub fn vcgtq_f16(a: float16x8_t, b: float16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vcgt_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { - unsafe { simd_gt(a, b) } +pub fn vbicq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { + let c = int64x2_t::splat(-1); + unsafe { simd_and(simd_xor(b, c), a) } } -#[doc = "Floating-point compare greater than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_f32)"] +#[doc = "Vector bitwise bit clear."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbicq_s8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.f32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fcmgt) + assert_instr(bic) )] #[cfg_attr( not(target_arch = "arm"), @@ -3140,18 +3649,19 @@ pub fn vcgt_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vcgtq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { - unsafe { simd_gt(a, b) } +pub fn vbicq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + let c = int8x16_t::splat(-1); + unsafe { simd_and(simd_xor(b, c), a) } } -#[doc = "Compare signed greater than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_s8)"] +#[doc = "Vector bitwise bit clear."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbic_u16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmgt) + assert_instr(bic) )] #[cfg_attr( not(target_arch = "arm"), @@ -3161,18 +3671,19 @@ pub fn vcgtq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vcgt_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { - unsafe { simd_gt(a, b) } +pub fn vbic_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + let c = int16x4_t::splat(-1); + unsafe { simd_and(simd_xor(b, transmute(c)), a) } } -#[doc = "Compare signed greater than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_s8)"] +#[doc = "Vector bitwise bit clear."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbic_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmgt) + assert_instr(bic) )] #[cfg_attr( not(target_arch = "arm"), @@ -3182,18 +3693,19 @@ pub fn vcgt_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vcgtq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { - unsafe { simd_gt(a, b) } +pub fn vbic_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + let c = int32x2_t::splat(-1); + unsafe { simd_and(simd_xor(b, transmute(c)), a) } } -#[doc = "Compare signed greater than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_s16)"] +#[doc = "Vector bitwise bit clear."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbic_u64)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmgt) + assert_instr(bic) )] #[cfg_attr( not(target_arch = "arm"), @@ -3203,18 +3715,19 @@ pub fn vcgtq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vcgt_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { - unsafe { simd_gt(a, b) } +pub fn vbic_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { + let c = int64x1_t::splat(-1); + unsafe { simd_and(simd_xor(b, transmute(c)), a) } } -#[doc = "Compare signed greater than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_s16)"] +#[doc = "Vector bitwise bit clear."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbic_u8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmgt) + assert_instr(bic) )] #[cfg_attr( not(target_arch = "arm"), @@ -3224,18 +3737,19 @@ pub fn vcgt_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vcgtq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { - unsafe { simd_gt(a, b) } +pub fn vbic_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + let c = int8x8_t::splat(-1); + unsafe { simd_and(simd_xor(b, transmute(c)), a) } } -#[doc = "Compare signed greater than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_s32)"] +#[doc = "Vector bitwise bit clear."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbicq_u16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmgt) + assert_instr(bic) )] #[cfg_attr( not(target_arch = "arm"), @@ -3245,18 +3759,19 @@ pub fn vcgtq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vcgt_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { - unsafe { simd_gt(a, b) } +pub fn vbicq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + let c = int16x8_t::splat(-1); + unsafe { simd_and(simd_xor(b, transmute(c)), a) } } -#[doc = "Compare signed greater than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_s32)"] +#[doc = "Vector bitwise bit clear."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbicq_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmgt) + assert_instr(bic) )] #[cfg_attr( not(target_arch = "arm"), @@ -3266,18 +3781,19 @@ pub fn vcgt_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vcgtq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { - unsafe { simd_gt(a, b) } +pub fn vbicq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + let c = int32x4_t::splat(-1); + unsafe { simd_and(simd_xor(b, transmute(c)), a) } } -#[doc = "Compare unsigned greater than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_u8)"] +#[doc = "Vector bitwise bit clear."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbicq_u64)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmhi) + assert_instr(bic) )] #[cfg_attr( not(target_arch = "arm"), @@ -3287,18 +3803,19 @@ pub fn vcgtq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vcgt_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - unsafe { simd_gt(a, b) } +pub fn vbicq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + let c = int64x2_t::splat(-1); + unsafe { simd_and(simd_xor(b, transmute(c)), a) } } -#[doc = "Compare unsigned greater than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_u8)"] +#[doc = "Vector bitwise bit clear."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbicq_u8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmhi) + assert_instr(bic) )] #[cfg_attr( not(target_arch = "arm"), @@ -3308,18 +3825,19 @@ pub fn vcgt_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vcgtq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - unsafe { simd_gt(a, b) } +pub fn vbicq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + let c = int8x16_t::splat(-1); + unsafe { simd_and(simd_xor(b, transmute(c)), a) } } -#[doc = "Compare unsigned greater than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_u16)"] +#[doc = "Bitwise Select."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbsl_f16)"] #[inline] -#[target_feature(enable = "neon")] +#[target_feature(enable = "neon,fp16")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmhi) + assert_instr(bsl) )] #[cfg_attr( not(target_arch = "arm"), @@ -3329,18 +3847,24 @@ pub fn vcgtq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vcgt_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - unsafe { simd_gt(a, b) } +pub fn vbsl_f16(a: uint16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t { + let not = int16x4_t::splat(-1); + unsafe { + transmute(simd_or( + simd_and(a, transmute(b)), + simd_and(simd_xor(a, transmute(not)), transmute(c)), + )) + } } -#[doc = "Compare unsigned greater than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_u16)"] +#[doc = "Bitwise Select."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbslq_f16)"] #[inline] -#[target_feature(enable = "neon")] +#[target_feature(enable = "neon,fp16")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmhi) + assert_instr(bsl) )] #[cfg_attr( not(target_arch = "arm"), @@ -3350,18 +3874,24 @@ pub fn vcgt_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vcgtq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - unsafe { simd_gt(a, b) } +pub fn vbslq_f16(a: uint16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t { + let not = int16x8_t::splat(-1); + unsafe { + transmute(simd_or( + simd_and(a, transmute(b)), + simd_and(simd_xor(a, transmute(not)), transmute(c)), + )) + } } -#[doc = "Compare unsigned greater than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_u32)"] +#[doc = "Bitwise Select."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbsl_f32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmhi) + assert_instr(bsl) )] #[cfg_attr( not(target_arch = "arm"), @@ -3371,18 +3901,24 @@ pub fn vcgtq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vcgt_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - unsafe { simd_gt(a, b) } +pub fn vbsl_f32(a: uint32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { + let not = int32x2_t::splat(-1); + unsafe { + transmute(simd_or( + simd_and(a, transmute(b)), + simd_and(simd_xor(a, transmute(not)), transmute(c)), + )) + } } -#[doc = "Compare unsigned greater than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_u32)"] +#[doc = "Bitwise Select."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbsl_p16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmhi) + assert_instr(bsl) )] #[cfg_attr( not(target_arch = "arm"), @@ -3392,76 +3928,51 @@ pub fn vcgt_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vcgtq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - unsafe { simd_gt(a, b) } -} -#[doc = "Floating-point compare greater than zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_f16)"] -#[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.f16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fcmgt) -)] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vcgtz_f16(a: float16x4_t) -> uint16x4_t { - let b: f16x4 = f16x4::new(0.0, 0.0, 0.0, 0.0); - unsafe { simd_gt(a, transmute(b)) } +pub fn vbsl_p16(a: uint16x4_t, b: poly16x4_t, c: poly16x4_t) -> poly16x4_t { + let not = int16x4_t::splat(-1); + unsafe { + transmute(simd_or( + simd_and(a, transmute(b)), + simd_and(simd_xor(a, transmute(not)), transmute(c)), + )) + } } -#[doc = "Floating-point compare greater than zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_f16)"] +#[doc = "Bitwise Select."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbsl_p8)"] #[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.f16"))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fcmgt) + assert_instr(bsl) )] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vcgtzq_f16(a: float16x8_t) -> uint16x8_t { - let b: f16x8 = f16x8::new(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0); - unsafe { simd_gt(a, transmute(b)) } -} -#[doc = "Floating-point compare less than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_f16)"] -#[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.f16"))] #[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fcmge) + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") )] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vcle_f16(a: float16x4_t, b: float16x4_t) -> uint16x4_t { - unsafe { simd_le(a, b) } -} -#[doc = "Floating-point compare less than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_f16)"] -#[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.f16"))] #[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fcmge) + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vcleq_f16(a: float16x8_t, b: float16x8_t) -> uint16x8_t { - unsafe { simd_le(a, b) } +pub fn vbsl_p8(a: uint8x8_t, b: poly8x8_t, c: poly8x8_t) -> poly8x8_t { + let not = int8x8_t::splat(-1); + unsafe { + transmute(simd_or( + simd_and(a, transmute(b)), + simd_and(simd_xor(a, transmute(not)), transmute(c)), + )) + } } -#[doc = "Floating-point compare less than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_f32)"] +#[doc = "Bitwise Select."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbsl_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.f32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fcmge) + assert_instr(bsl) )] #[cfg_attr( not(target_arch = "arm"), @@ -3471,18 +3982,24 @@ pub fn vcleq_f16(a: float16x8_t, b: float16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vcle_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { - unsafe { simd_le(a, b) } +pub fn vbsl_s16(a: uint16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { + let not = int16x4_t::splat(-1); + unsafe { + transmute(simd_or( + simd_and(a, transmute(b)), + simd_and(simd_xor(a, transmute(not)), transmute(c)), + )) + } } -#[doc = "Floating-point compare less than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_f32)"] +#[doc = "Bitwise Select."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbsl_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.f32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fcmge) + assert_instr(bsl) )] #[cfg_attr( not(target_arch = "arm"), @@ -3492,18 +4009,24 @@ pub fn vcle_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vcleq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { - unsafe { simd_le(a, b) } +pub fn vbsl_s32(a: uint32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { + let not = int32x2_t::splat(-1); + unsafe { + transmute(simd_or( + simd_and(a, transmute(b)), + simd_and(simd_xor(a, transmute(not)), transmute(c)), + )) + } } -#[doc = "Compare signed less than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_s8)"] +#[doc = "Bitwise Select."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbsl_s64)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmge) + assert_instr(bsl) )] #[cfg_attr( not(target_arch = "arm"), @@ -3513,18 +4036,24 @@ pub fn vcleq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vcle_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { - unsafe { simd_le(a, b) } +pub fn vbsl_s64(a: uint64x1_t, b: int64x1_t, c: int64x1_t) -> int64x1_t { + let not = int64x1_t::splat(-1); + unsafe { + transmute(simd_or( + simd_and(a, transmute(b)), + simd_and(simd_xor(a, transmute(not)), transmute(c)), + )) + } } -#[doc = "Compare signed less than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_s8)"] +#[doc = "Bitwise Select."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbsl_s8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmge) + assert_instr(bsl) )] #[cfg_attr( not(target_arch = "arm"), @@ -3534,18 +4063,24 @@ pub fn vcle_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vcleq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { - unsafe { simd_le(a, b) } +pub fn vbsl_s8(a: uint8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { + let not = int8x8_t::splat(-1); + unsafe { + transmute(simd_or( + simd_and(a, transmute(b)), + simd_and(simd_xor(a, transmute(not)), transmute(c)), + )) + } } -#[doc = "Compare signed less than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_s16)"] +#[doc = "Bitwise Select."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbslq_f32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmge) + assert_instr(bsl) )] #[cfg_attr( not(target_arch = "arm"), @@ -3555,18 +4090,24 @@ pub fn vcleq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vcle_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { - unsafe { simd_le(a, b) } +pub fn vbslq_f32(a: uint32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { + let not = int32x4_t::splat(-1); + unsafe { + transmute(simd_or( + simd_and(a, transmute(b)), + simd_and(simd_xor(a, transmute(not)), transmute(c)), + )) + } } -#[doc = "Compare signed less than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_s16)"] +#[doc = "Bitwise Select."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbslq_p16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmge) + assert_instr(bsl) )] #[cfg_attr( not(target_arch = "arm"), @@ -3576,18 +4117,24 @@ pub fn vcle_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vcleq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { - unsafe { simd_le(a, b) } +pub fn vbslq_p16(a: uint16x8_t, b: poly16x8_t, c: poly16x8_t) -> poly16x8_t { + let not = int16x8_t::splat(-1); + unsafe { + transmute(simd_or( + simd_and(a, transmute(b)), + simd_and(simd_xor(a, transmute(not)), transmute(c)), + )) + } } -#[doc = "Compare signed less than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_s32)"] +#[doc = "Bitwise Select."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbslq_p8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmge) + assert_instr(bsl) )] #[cfg_attr( not(target_arch = "arm"), @@ -3597,18 +4144,24 @@ pub fn vcleq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vcle_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { - unsafe { simd_le(a, b) } +pub fn vbslq_p8(a: uint8x16_t, b: poly8x16_t, c: poly8x16_t) -> poly8x16_t { + let not = int8x16_t::splat(-1); + unsafe { + transmute(simd_or( + simd_and(a, transmute(b)), + simd_and(simd_xor(a, transmute(not)), transmute(c)), + )) + } } -#[doc = "Compare signed less than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_s32)"] +#[doc = "Bitwise Select."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbslq_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmge) + assert_instr(bsl) )] #[cfg_attr( not(target_arch = "arm"), @@ -3618,18 +4171,24 @@ pub fn vcle_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vcleq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { - unsafe { simd_le(a, b) } +pub fn vbslq_s16(a: uint16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { + let not = int16x8_t::splat(-1); + unsafe { + transmute(simd_or( + simd_and(a, transmute(b)), + simd_and(simd_xor(a, transmute(not)), transmute(c)), + )) + } } -#[doc = "Compare unsigned less than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_u8)"] +#[doc = "Bitwise Select."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbslq_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmhs) + assert_instr(bsl) )] #[cfg_attr( not(target_arch = "arm"), @@ -3639,18 +4198,24 @@ pub fn vcleq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vcle_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - unsafe { simd_le(a, b) } +pub fn vbslq_s32(a: uint32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { + let not = int32x4_t::splat(-1); + unsafe { + transmute(simd_or( + simd_and(a, transmute(b)), + simd_and(simd_xor(a, transmute(not)), transmute(c)), + )) + } } -#[doc = "Compare unsigned less than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_u8)"] +#[doc = "Bitwise Select."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbslq_s64)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmhs) + assert_instr(bsl) )] #[cfg_attr( not(target_arch = "arm"), @@ -3660,18 +4225,24 @@ pub fn vcle_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vcleq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - unsafe { simd_le(a, b) } +pub fn vbslq_s64(a: uint64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t { + let not = int64x2_t::splat(-1); + unsafe { + transmute(simd_or( + simd_and(a, transmute(b)), + simd_and(simd_xor(a, transmute(not)), transmute(c)), + )) + } } -#[doc = "Compare unsigned less than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_u16)"] +#[doc = "Bitwise Select."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbslq_s8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmhs) + assert_instr(bsl) )] #[cfg_attr( not(target_arch = "arm"), @@ -3681,18 +4252,24 @@ pub fn vcleq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vcle_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - unsafe { simd_le(a, b) } +pub fn vbslq_s8(a: uint8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t { + let not = int8x16_t::splat(-1); + unsafe { + transmute(simd_or( + simd_and(a, transmute(b)), + simd_and(simd_xor(a, transmute(not)), transmute(c)), + )) + } } -#[doc = "Compare unsigned less than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_u16)"] +#[doc = "Bitwise Select."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbsl_u16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmhs) + assert_instr(bsl) )] #[cfg_attr( not(target_arch = "arm"), @@ -3702,18 +4279,24 @@ pub fn vcle_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vcleq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - unsafe { simd_le(a, b) } +pub fn vbsl_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x4_t) -> uint16x4_t { + let not = int16x4_t::splat(-1); + unsafe { + transmute(simd_or( + simd_and(a, b), + simd_and(simd_xor(a, transmute(not)), c), + )) + } } -#[doc = "Compare unsigned less than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_u32)"] +#[doc = "Bitwise Select."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbsl_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmhs) + assert_instr(bsl) )] #[cfg_attr( not(target_arch = "arm"), @@ -3723,18 +4306,24 @@ pub fn vcleq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vcle_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - unsafe { simd_le(a, b) } +pub fn vbsl_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x2_t) -> uint32x2_t { + let not = int32x2_t::splat(-1); + unsafe { + transmute(simd_or( + simd_and(a, b), + simd_and(simd_xor(a, transmute(not)), c), + )) + } } -#[doc = "Compare unsigned less than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_u32)"] +#[doc = "Bitwise Select."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbsl_u64)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmhs) + assert_instr(bsl) )] #[cfg_attr( not(target_arch = "arm"), @@ -3744,48 +4333,51 @@ pub fn vcle_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vcleq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - unsafe { simd_le(a, b) } +pub fn vbsl_u64(a: uint64x1_t, b: uint64x1_t, c: uint64x1_t) -> uint64x1_t { + let not = int64x1_t::splat(-1); + unsafe { + transmute(simd_or( + simd_and(a, b), + simd_and(simd_xor(a, transmute(not)), c), + )) + } } -#[doc = "Floating-point compare less than or equal to zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_f16)"] +#[doc = "Bitwise Select."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbsl_u8)"] #[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcle.f16"))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fcmle) + assert_instr(bsl) )] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vclez_f16(a: float16x4_t) -> uint16x4_t { - let b: f16x4 = f16x4::new(0.0, 0.0, 0.0, 0.0); - unsafe { simd_le(a, transmute(b)) } -} -#[doc = "Floating-point compare less than or equal to zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_f16)"] -#[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcle.f16"))] #[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fcmle) + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") )] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vclezq_f16(a: float16x8_t) -> uint16x8_t { - let b: f16x8 = f16x8::new(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0); - unsafe { simd_le(a, transmute(b)) } +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vbsl_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t { + let not = int8x8_t::splat(-1); + unsafe { + transmute(simd_or( + simd_and(a, b), + simd_and(simd_xor(a, transmute(not)), c), + )) + } } -#[doc = "Count leading sign bits"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcls_s8)"] +#[doc = "Bitwise Select."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbslq_u16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcls.s8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cls) + assert_instr(bsl) )] #[cfg_attr( not(target_arch = "arm"), @@ -3795,26 +4387,24 @@ pub fn vclezq_f16(a: float16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vcls_s8(a: int8x8_t) -> int8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vcls.v8i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.cls.v8i8" - )] - fn _vcls_s8(a: int8x8_t) -> int8x8_t; +pub fn vbslq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t { + let not = int16x8_t::splat(-1); + unsafe { + transmute(simd_or( + simd_and(a, b), + simd_and(simd_xor(a, transmute(not)), c), + )) } - unsafe { _vcls_s8(a) } } -#[doc = "Count leading sign bits"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclsq_s8)"] +#[doc = "Bitwise Select."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbslq_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcls.s8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cls) + assert_instr(bsl) )] #[cfg_attr( not(target_arch = "arm"), @@ -3824,26 +4414,24 @@ pub fn vcls_s8(a: int8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vclsq_s8(a: int8x16_t) -> int8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vcls.v16i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.cls.v16i8" - )] - fn _vclsq_s8(a: int8x16_t) -> int8x16_t; +pub fn vbslq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { + let not = int32x4_t::splat(-1); + unsafe { + transmute(simd_or( + simd_and(a, b), + simd_and(simd_xor(a, transmute(not)), c), + )) } - unsafe { _vclsq_s8(a) } } -#[doc = "Count leading sign bits"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcls_s16)"] +#[doc = "Bitwise Select."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbslq_u64)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcls.s16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cls) + assert_instr(bsl) )] #[cfg_attr( not(target_arch = "arm"), @@ -3853,26 +4441,24 @@ pub fn vclsq_s8(a: int8x16_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vcls_s16(a: int16x4_t) -> int16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vcls.v4i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.cls.v4i16" - )] - fn _vcls_s16(a: int16x4_t) -> int16x4_t; +pub fn vbslq_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t { + let not = int64x2_t::splat(-1); + unsafe { + transmute(simd_or( + simd_and(a, b), + simd_and(simd_xor(a, transmute(not)), c), + )) } - unsafe { _vcls_s16(a) } } -#[doc = "Count leading sign bits"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclsq_s16)"] +#[doc = "Bitwise Select."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbslq_u8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcls.s16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cls) + assert_instr(bsl) )] #[cfg_attr( not(target_arch = "arm"), @@ -3882,26 +4468,68 @@ pub fn vcls_s16(a: int16x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vclsq_s16(a: int16x8_t) -> int16x8_t { +pub fn vbslq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t { + let not = int8x16_t::splat(-1); + unsafe { + transmute(simd_or( + simd_and(a, b), + simd_and(simd_xor(a, transmute(not)), c), + )) + } +} +#[doc = "Floating-point absolute compare greater than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcage_f16)"] +#[inline] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacge.f16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(facge) +)] +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +pub fn vcage_f16(a: float16x4_t, b: float16x4_t) -> uint16x4_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vcls.v8i16")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vacge.v4i16.v4f16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.cls.v8i16" + link_name = "llvm.aarch64.neon.facge.v4i16.v4f16" )] - fn _vclsq_s16(a: int16x8_t) -> int16x8_t; + fn _vcage_f16(a: float16x4_t, b: float16x4_t) -> uint16x4_t; } - unsafe { _vclsq_s16(a) } + unsafe { _vcage_f16(a, b) } } -#[doc = "Count leading sign bits"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcls_s32)"] +#[doc = "Floating-point absolute compare greater than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcageq_f16)"] +#[inline] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacge.f16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(facge) +)] +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +pub fn vcageq_f16(a: float16x8_t, b: float16x8_t) -> uint16x8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vacge.v8i16.v8f16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.facge.v8i16.v8f16" + )] + fn _vcageq_f16(a: float16x8_t, b: float16x8_t) -> uint16x8_t; + } + unsafe { _vcageq_f16(a, b) } +} +#[doc = "Floating-point absolute compare greater than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcage_f32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcls.s32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacge.f32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cls) + assert_instr(facge) )] #[cfg_attr( not(target_arch = "arm"), @@ -3911,26 +4539,26 @@ pub fn vclsq_s16(a: int16x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vcls_s32(a: int32x2_t) -> int32x2_t { +pub fn vcage_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vcls.v2i32")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vacge.v2i32.v2f32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.cls.v2i32" + link_name = "llvm.aarch64.neon.facge.v2i32.v2f32" )] - fn _vcls_s32(a: int32x2_t) -> int32x2_t; + fn _vcage_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t; } - unsafe { _vcls_s32(a) } + unsafe { _vcage_f32(a, b) } } -#[doc = "Count leading sign bits"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclsq_s32)"] +#[doc = "Floating-point absolute compare greater than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcageq_f32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcls.s32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacge.f32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cls) + assert_instr(facge) )] #[cfg_attr( not(target_arch = "arm"), @@ -3940,47 +4568,70 @@ pub fn vcls_s32(a: int32x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vclsq_s32(a: int32x4_t) -> int32x4_t { +pub fn vcageq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vcls.v4i32")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vacge.v4i32.v4f32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.cls.v4i32" + link_name = "llvm.aarch64.neon.facge.v4i32.v4f32" )] - fn _vclsq_s32(a: int32x4_t) -> int32x4_t; + fn _vcageq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t; } - unsafe { _vclsq_s32(a) } + unsafe { _vcageq_f32(a, b) } } -#[doc = "Count leading sign bits"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcls_u8)"] +#[doc = "Floating-point absolute compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagt_f16)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcls))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacgt.f16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cls) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + assert_instr(facgt) )] +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +pub fn vcagt_f16(a: float16x4_t, b: float16x4_t) -> uint16x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vacgt.v4i16.v4f16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.facgt.v4i16.v4f16" + )] + fn _vcagt_f16(a: float16x4_t, b: float16x4_t) -> uint16x4_t; + } + unsafe { _vcagt_f16(a, b) } +} +#[doc = "Floating-point absolute compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagtq_f16)"] +#[inline] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacgt.f16"))] #[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(facgt) )] -pub fn vcls_u8(a: uint8x8_t) -> int8x8_t { - unsafe { vcls_s8(transmute(a)) } +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +pub fn vcagtq_f16(a: float16x8_t, b: float16x8_t) -> uint16x8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vacgt.v8i16.v8f16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.facgt.v8i16.v8f16" + )] + fn _vcagtq_f16(a: float16x8_t, b: float16x8_t) -> uint16x8_t; + } + unsafe { _vcagtq_f16(a, b) } } -#[doc = "Count leading sign bits"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclsq_u8)"] +#[doc = "Floating-point absolute compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagt_f32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcls))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacgt.f32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cls) + assert_instr(facgt) )] #[cfg_attr( not(target_arch = "arm"), @@ -3990,18 +4641,26 @@ pub fn vcls_u8(a: uint8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vclsq_u8(a: uint8x16_t) -> int8x16_t { - unsafe { vclsq_s8(transmute(a)) } +pub fn vcagt_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vacgt.v2i32.v2f32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.facgt.v2i32.v2f32" + )] + fn _vcagt_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t; + } + unsafe { _vcagt_f32(a, b) } } -#[doc = "Count leading sign bits"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcls_u16)"] +#[doc = "Floating-point absolute compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagtq_f32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcls))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacgt.f32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cls) + assert_instr(facgt) )] #[cfg_attr( not(target_arch = "arm"), @@ -4011,39 +4670,54 @@ pub fn vclsq_u8(a: uint8x16_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vcls_u16(a: uint16x4_t) -> int16x4_t { - unsafe { vcls_s16(transmute(a)) } +pub fn vcagtq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vacgt.v4i32.v4f32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.facgt.v4i32.v4f32" + )] + fn _vcagtq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t; + } + unsafe { _vcagtq_f32(a, b) } } -#[doc = "Count leading sign bits"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclsq_u16)"] +#[doc = "Floating-point absolute compare less than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcale_f16)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcls))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacge.f16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cls) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + assert_instr(facge) )] +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +pub fn vcale_f16(a: float16x4_t, b: float16x4_t) -> uint16x4_t { + vcage_f16(b, a) +} +#[doc = "Floating-point absolute compare less than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaleq_f16)"] +#[inline] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacge.f16"))] #[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(facge) )] -pub fn vclsq_u16(a: uint16x8_t) -> int16x8_t { - unsafe { vclsq_s16(transmute(a)) } +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +pub fn vcaleq_f16(a: float16x8_t, b: float16x8_t) -> uint16x8_t { + vcageq_f16(b, a) } -#[doc = "Count leading sign bits"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcls_u32)"] +#[doc = "Floating-point absolute compare less than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcale_f32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcls))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacge.f32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cls) + assert_instr(facge) )] #[cfg_attr( not(target_arch = "arm"), @@ -4053,18 +4727,18 @@ pub fn vclsq_u16(a: uint16x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vcls_u32(a: uint32x2_t) -> int32x2_t { - unsafe { vcls_s32(transmute(a)) } +pub fn vcale_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { + vcage_f32(b, a) } -#[doc = "Count leading sign bits"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclsq_u32)"] +#[doc = "Floating-point absolute compare less than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaleq_f32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcls))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacge.f32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cls) + assert_instr(facge) )] #[cfg_attr( not(target_arch = "arm"), @@ -4074,46 +4748,46 @@ pub fn vcls_u32(a: uint32x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vclsq_u32(a: uint32x4_t) -> int32x4_t { - unsafe { vclsq_s32(transmute(a)) } +pub fn vcaleq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { + vcageq_f32(b, a) } -#[doc = "Floating-point compare less than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_f16)"] +#[doc = "Floating-point absolute compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcalt_f16)"] #[inline] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.f16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacgt.f16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fcmgt) + assert_instr(facgt) )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vclt_f16(a: float16x4_t, b: float16x4_t) -> uint16x4_t { - unsafe { simd_lt(a, b) } +pub fn vcalt_f16(a: float16x4_t, b: float16x4_t) -> uint16x4_t { + vcagt_f16(b, a) } -#[doc = "Floating-point compare less than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_f16)"] +#[doc = "Floating-point absolute compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaltq_f16)"] #[inline] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.f16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacgt.f16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fcmgt) + assert_instr(facgt) )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vcltq_f16(a: float16x8_t, b: float16x8_t) -> uint16x8_t { - unsafe { simd_lt(a, b) } +pub fn vcaltq_f16(a: float16x8_t, b: float16x8_t) -> uint16x8_t { + vcagtq_f16(b, a) } -#[doc = "Floating-point compare less than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_f32)"] +#[doc = "Floating-point absolute compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcalt_f32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.f32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacgt.f32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fcmgt) + assert_instr(facgt) )] #[cfg_attr( not(target_arch = "arm"), @@ -4123,18 +4797,18 @@ pub fn vcltq_f16(a: float16x8_t, b: float16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vclt_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { - unsafe { simd_lt(a, b) } +pub fn vcalt_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { + vcagt_f32(b, a) } -#[doc = "Floating-point compare less than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_f32)"] +#[doc = "Floating-point absolute compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaltq_f32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.f32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacgt.f32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fcmgt) + assert_instr(facgt) )] #[cfg_attr( not(target_arch = "arm"), @@ -4144,18 +4818,46 @@ pub fn vclt_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vcltq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { - unsafe { simd_lt(a, b) } +pub fn vcaltq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { + vcagtq_f32(b, a) } -#[doc = "Compare signed less than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_s8)"] +#[doc = "Floating-point compare equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_f16)"] +#[inline] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.f16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fcmeq) +)] +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +pub fn vceq_f16(a: float16x4_t, b: float16x4_t) -> uint16x4_t { + unsafe { simd_eq(a, b) } +} +#[doc = "Floating-point compare equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_f16)"] +#[inline] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.f16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fcmeq) +)] +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +pub fn vceqq_f16(a: float16x8_t, b: float16x8_t) -> uint16x8_t { + unsafe { simd_eq(a, b) } +} +#[doc = "Floating-point compare equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_f32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.f32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmgt) + assert_instr(fcmeq) )] #[cfg_attr( not(target_arch = "arm"), @@ -4165,18 +4867,18 @@ pub fn vcltq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vclt_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { - unsafe { simd_lt(a, b) } +pub fn vceq_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { + unsafe { simd_eq(a, b) } } -#[doc = "Compare signed less than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_s8)"] +#[doc = "Floating-point compare equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_f32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.f32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmgt) + assert_instr(fcmeq) )] #[cfg_attr( not(target_arch = "arm"), @@ -4186,18 +4888,18 @@ pub fn vclt_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vcltq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { - unsafe { simd_lt(a, b) } +pub fn vceqq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { + unsafe { simd_eq(a, b) } } -#[doc = "Compare signed less than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_s16)"] +#[doc = "Compare bitwise Equal (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_s8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmgt) + assert_instr(cmeq) )] #[cfg_attr( not(target_arch = "arm"), @@ -4207,18 +4909,18 @@ pub fn vcltq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vclt_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { - unsafe { simd_lt(a, b) } +pub fn vceq_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { + unsafe { simd_eq(a, b) } } -#[doc = "Compare signed less than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_s16)"] +#[doc = "Compare bitwise Equal (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_s8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmgt) + assert_instr(cmeq) )] #[cfg_attr( not(target_arch = "arm"), @@ -4228,18 +4930,18 @@ pub fn vclt_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vcltq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { - unsafe { simd_lt(a, b) } +pub fn vceqq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { + unsafe { simd_eq(a, b) } } -#[doc = "Compare signed less than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_s32)"] +#[doc = "Compare bitwise Equal (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmgt) + assert_instr(cmeq) )] #[cfg_attr( not(target_arch = "arm"), @@ -4249,18 +4951,18 @@ pub fn vcltq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vclt_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { - unsafe { simd_lt(a, b) } +pub fn vceq_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { + unsafe { simd_eq(a, b) } } -#[doc = "Compare signed less than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_s32)"] +#[doc = "Compare bitwise Equal (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmgt) + assert_instr(cmeq) )] #[cfg_attr( not(target_arch = "arm"), @@ -4270,18 +4972,18 @@ pub fn vclt_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vcltq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { - unsafe { simd_lt(a, b) } +pub fn vceqq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { + unsafe { simd_eq(a, b) } } -#[doc = "Compare unsigned less than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_u8)"] +#[doc = "Compare bitwise Equal (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmhi) + assert_instr(cmeq) )] #[cfg_attr( not(target_arch = "arm"), @@ -4291,18 +4993,18 @@ pub fn vcltq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vclt_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - unsafe { simd_lt(a, b) } +pub fn vceq_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { + unsafe { simd_eq(a, b) } } -#[doc = "Compare unsigned less than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_u8)"] +#[doc = "Compare bitwise Equal (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmhi) + assert_instr(cmeq) )] #[cfg_attr( not(target_arch = "arm"), @@ -4312,18 +5014,18 @@ pub fn vclt_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vcltq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - unsafe { simd_lt(a, b) } +pub fn vceqq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { + unsafe { simd_eq(a, b) } } -#[doc = "Compare unsigned less than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_u16)"] +#[doc = "Compare bitwise Equal (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_u8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmhi) + assert_instr(cmeq) )] #[cfg_attr( not(target_arch = "arm"), @@ -4333,18 +5035,18 @@ pub fn vcltq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vclt_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - unsafe { simd_lt(a, b) } +pub fn vceq_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + unsafe { simd_eq(a, b) } } -#[doc = "Compare unsigned less than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_u16)"] +#[doc = "Compare bitwise Equal (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_u8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmhi) + assert_instr(cmeq) )] #[cfg_attr( not(target_arch = "arm"), @@ -4354,18 +5056,18 @@ pub fn vclt_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vcltq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - unsafe { simd_lt(a, b) } +pub fn vceqq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + unsafe { simd_eq(a, b) } } -#[doc = "Compare unsigned less than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_u32)"] +#[doc = "Compare bitwise Equal (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_u16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmhi) + assert_instr(cmeq) )] #[cfg_attr( not(target_arch = "arm"), @@ -4375,18 +5077,18 @@ pub fn vcltq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vclt_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - unsafe { simd_lt(a, b) } +pub fn vceq_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + unsafe { simd_eq(a, b) } } -#[doc = "Compare unsigned less than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_u32)"] +#[doc = "Compare bitwise Equal (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_u16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmhi) + assert_instr(cmeq) )] #[cfg_attr( not(target_arch = "arm"), @@ -4396,48 +5098,18 @@ pub fn vclt_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vcltq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - unsafe { simd_lt(a, b) } -} -#[doc = "Floating-point compare less than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_f16)"] -#[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclt.f16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fcmlt) -)] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vcltz_f16(a: float16x4_t) -> uint16x4_t { - let b: f16x4 = f16x4::new(0.0, 0.0, 0.0, 0.0); - unsafe { simd_lt(a, transmute(b)) } -} -#[doc = "Floating-point compare less than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_f16)"] -#[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclt.f16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fcmlt) -)] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vcltzq_f16(a: float16x8_t) -> uint16x8_t { - let b: f16x8 = f16x8::new(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0); - unsafe { simd_lt(a, transmute(b)) } +pub fn vceqq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + unsafe { simd_eq(a, b) } } -#[doc = "Count leading zero bits"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclz_s8)"] +#[doc = "Compare bitwise Equal (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(clz) + assert_instr(cmeq) )] #[cfg_attr( not(target_arch = "arm"), @@ -4447,26 +5119,18 @@ pub fn vcltzq_f16(a: float16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vclz_s8(a: int8x8_t) -> int8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.ctlz.v8i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.ctlz.v8i8" - )] - fn _vclz_s8(a: int8x8_t) -> int8x8_t; - } - unsafe { _vclz_s8(a) } +pub fn vceq_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + unsafe { simd_eq(a, b) } } -#[doc = "Count leading zero bits"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclzq_s8)"] +#[doc = "Compare bitwise Equal (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(clz) + assert_instr(cmeq) )] #[cfg_attr( not(target_arch = "arm"), @@ -4476,26 +5140,18 @@ pub fn vclz_s8(a: int8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vclzq_s8(a: int8x16_t) -> int8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.ctlz.v16i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.ctlz.v16i8" - )] - fn _vclzq_s8(a: int8x16_t) -> int8x16_t; - } - unsafe { _vclzq_s8(a) } +pub fn vceqq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + unsafe { simd_eq(a, b) } } -#[doc = "Count leading zero bits"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclz_s16)"] +#[doc = "Compare bitwise Equal (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_p8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(clz) + assert_instr(cmeq) )] #[cfg_attr( not(target_arch = "arm"), @@ -4505,26 +5161,18 @@ pub fn vclzq_s8(a: int8x16_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vclz_s16(a: int16x4_t) -> int16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.ctlz.v4i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.ctlz.v4i16" - )] - fn _vclz_s16(a: int16x4_t) -> int16x4_t; - } - unsafe { _vclz_s16(a) } +pub fn vceq_p8(a: poly8x8_t, b: poly8x8_t) -> uint8x8_t { + unsafe { simd_eq(a, b) } } -#[doc = "Count leading zero bits"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclzq_s16)"] +#[doc = "Compare bitwise Equal (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_p8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(clz) + assert_instr(cmeq) )] #[cfg_attr( not(target_arch = "arm"), @@ -4534,26 +5182,46 @@ pub fn vclz_s16(a: int16x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vclzq_s16(a: int16x8_t) -> int16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.ctlz.v8i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.ctlz.v8i16" - )] - fn _vclzq_s16(a: int16x8_t) -> int16x8_t; - } - unsafe { _vclzq_s16(a) } +pub fn vceqq_p8(a: poly8x16_t, b: poly8x16_t) -> uint8x16_t { + unsafe { simd_eq(a, b) } } -#[doc = "Count leading zero bits"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclz_s32)"] +#[doc = "Floating-point compare greater than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_f16)"] +#[inline] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.f16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fcmge) +)] +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +pub fn vcge_f16(a: float16x4_t, b: float16x4_t) -> uint16x4_t { + unsafe { simd_ge(a, b) } +} +#[doc = "Floating-point compare greater than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_f16)"] +#[inline] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.f16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fcmge) +)] +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +pub fn vcgeq_f16(a: float16x8_t, b: float16x8_t) -> uint16x8_t { + unsafe { simd_ge(a, b) } +} +#[doc = "Floating-point compare greater than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_f32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.f32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(clz) + assert_instr(fcmge) )] #[cfg_attr( not(target_arch = "arm"), @@ -4563,26 +5231,18 @@ pub fn vclzq_s16(a: int16x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vclz_s32(a: int32x2_t) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.ctlz.v2i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.ctlz.v2i32" - )] - fn _vclz_s32(a: int32x2_t) -> int32x2_t; - } - unsafe { _vclz_s32(a) } +pub fn vcge_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { + unsafe { simd_ge(a, b) } } -#[doc = "Count leading zero bits"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclzq_s32)"] +#[doc = "Floating-point compare greater than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_f32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.f32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(clz) + assert_instr(fcmge) )] #[cfg_attr( not(target_arch = "arm"), @@ -4592,27 +5252,18 @@ pub fn vclz_s32(a: int32x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vclzq_s32(a: int32x4_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.ctlz.v4i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.ctlz.v4i32" - )] - fn _vclzq_s32(a: int32x4_t) -> int32x4_t; - } - unsafe { _vclzq_s32(a) } +pub fn vcgeq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { + unsafe { simd_ge(a, b) } } -#[doc = "Count leading zero bits"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclz_u16)"] +#[doc = "Compare signed greater than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_s8)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(clz) + assert_instr(cmge) )] #[cfg_attr( not(target_arch = "arm"), @@ -4622,19 +5273,18 @@ pub fn vclzq_s32(a: int32x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vclz_u16(a: uint16x4_t) -> uint16x4_t { - unsafe { transmute(vclz_s16(transmute(a))) } +pub fn vcge_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { + unsafe { simd_ge(a, b) } } -#[doc = "Count leading zero bits"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclz_u16)"] +#[doc = "Compare signed greater than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_s8)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(clz) + assert_instr(cmge) )] #[cfg_attr( not(target_arch = "arm"), @@ -4644,23 +5294,18 @@ pub fn vclz_u16(a: uint16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vclz_u16(a: uint16x4_t) -> uint16x4_t { - let a: uint16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - unsafe { - let ret_val: uint16x4_t = transmute(vclz_s16(transmute(a))); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } +pub fn vcgeq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { + unsafe { simd_ge(a, b) } } -#[doc = "Count leading zero bits"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclzq_u16)"] +#[doc = "Compare signed greater than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_s16)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(clz) + assert_instr(cmge) )] #[cfg_attr( not(target_arch = "arm"), @@ -4670,19 +5315,18 @@ pub fn vclz_u16(a: uint16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vclzq_u16(a: uint16x8_t) -> uint16x8_t { - unsafe { transmute(vclzq_s16(transmute(a))) } +pub fn vcge_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { + unsafe { simd_ge(a, b) } } -#[doc = "Count leading zero bits"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclzq_u16)"] +#[doc = "Compare signed greater than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_s16)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(clz) + assert_instr(cmge) )] #[cfg_attr( not(target_arch = "arm"), @@ -4692,23 +5336,18 @@ pub fn vclzq_u16(a: uint16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vclzq_u16(a: uint16x8_t) -> uint16x8_t { - let a: uint16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: uint16x8_t = transmute(vclzq_s16(transmute(a))); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } +pub fn vcgeq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { + unsafe { simd_ge(a, b) } } -#[doc = "Count leading zero bits"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclz_u32)"] +#[doc = "Compare signed greater than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_s32)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(clz) + assert_instr(cmge) )] #[cfg_attr( not(target_arch = "arm"), @@ -4718,19 +5357,18 @@ pub fn vclzq_u16(a: uint16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vclz_u32(a: uint32x2_t) -> uint32x2_t { - unsafe { transmute(vclz_s32(transmute(a))) } +pub fn vcge_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { + unsafe { simd_ge(a, b) } } -#[doc = "Count leading zero bits"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclz_u32)"] +#[doc = "Compare signed greater than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_s32)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(clz) + assert_instr(cmge) )] #[cfg_attr( not(target_arch = "arm"), @@ -4740,23 +5378,18 @@ pub fn vclz_u32(a: uint32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vclz_u32(a: uint32x2_t) -> uint32x2_t { - let a: uint32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - unsafe { - let ret_val: uint32x2_t = transmute(vclz_s32(transmute(a))); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } +pub fn vcgeq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { + unsafe { simd_ge(a, b) } } -#[doc = "Count leading zero bits"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclzq_u32)"] +#[doc = "Compare unsigned greater than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_u8)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(clz) + assert_instr(cmhs) )] #[cfg_attr( not(target_arch = "arm"), @@ -4766,19 +5399,18 @@ pub fn vclz_u32(a: uint32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vclzq_u32(a: uint32x4_t) -> uint32x4_t { - unsafe { transmute(vclzq_s32(transmute(a))) } +pub fn vcge_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + unsafe { simd_ge(a, b) } } -#[doc = "Count leading zero bits"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclzq_u32)"] +#[doc = "Compare unsigned greater than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_u8)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(clz) + assert_instr(cmhs) )] #[cfg_attr( not(target_arch = "arm"), @@ -4788,23 +5420,18 @@ pub fn vclzq_u32(a: uint32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vclzq_u32(a: uint32x4_t) -> uint32x4_t { - let a: uint32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - unsafe { - let ret_val: uint32x4_t = transmute(vclzq_s32(transmute(a))); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } +pub fn vcgeq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + unsafe { simd_ge(a, b) } } -#[doc = "Count leading zero bits"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclz_u8)"] +#[doc = "Compare unsigned greater than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_u16)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(clz) + assert_instr(cmhs) )] #[cfg_attr( not(target_arch = "arm"), @@ -4814,19 +5441,18 @@ pub fn vclzq_u32(a: uint32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vclz_u8(a: uint8x8_t) -> uint8x8_t { - unsafe { transmute(vclz_s8(transmute(a))) } +pub fn vcge_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + unsafe { simd_ge(a, b) } } -#[doc = "Count leading zero bits"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclz_u8)"] +#[doc = "Compare unsigned greater than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_u16)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(clz) + assert_instr(cmhs) )] #[cfg_attr( not(target_arch = "arm"), @@ -4836,23 +5462,18 @@ pub fn vclz_u8(a: uint8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vclz_u8(a: uint8x8_t) -> uint8x8_t { - let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: uint8x8_t = transmute(vclz_s8(transmute(a))); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } +pub fn vcgeq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + unsafe { simd_ge(a, b) } } -#[doc = "Count leading zero bits"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclzq_u8)"] +#[doc = "Compare unsigned greater than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_u32)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(clz) + assert_instr(cmhs) )] #[cfg_attr( not(target_arch = "arm"), @@ -4862,19 +5483,18 @@ pub fn vclz_u8(a: uint8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vclzq_u8(a: uint8x16_t) -> uint8x16_t { - unsafe { transmute(vclzq_s8(transmute(a))) } +pub fn vcge_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + unsafe { simd_ge(a, b) } } -#[doc = "Count leading zero bits"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclzq_u8)"] +#[doc = "Compare unsigned greater than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_u32)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(clz) + assert_instr(cmhs) )] #[cfg_attr( not(target_arch = "arm"), @@ -4884,56 +5504,76 @@ pub fn vclzq_u8(a: uint8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vclzq_u8(a: uint8x16_t) -> uint8x16_t { - let a: uint8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: uint8x16_t = transmute(vclzq_s8(transmute(a))); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - } +pub fn vcgeq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + unsafe { simd_ge(a, b) } } -#[doc = "Population count per byte."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcnt_s8)"] +#[doc = "Floating-point compare greater than or equal to zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_f16)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcnt))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.f16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cnt) + assert_instr(fcmge) )] +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +pub fn vcgez_f16(a: float16x4_t) -> uint16x4_t { + let b: f16x4 = f16x4::new(0.0, 0.0, 0.0, 0.0); + unsafe { simd_ge(a, transmute(b)) } +} +#[doc = "Floating-point compare greater than or equal to zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_f16)"] +#[inline] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.f16"))] #[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fcmge) )] +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +pub fn vcgezq_f16(a: float16x8_t) -> uint16x8_t { + let b: f16x8 = f16x8::new(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0); + unsafe { simd_ge(a, transmute(b)) } +} +#[doc = "Floating-point compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_f16)"] +#[inline] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.f16"))] #[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fcmgt) )] -pub fn vcnt_s8(a: int8x8_t) -> int8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.ctpop.v8i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.ctpop.v8i8")] - fn _vcnt_s8(a: int8x8_t) -> int8x8_t; - } - unsafe { _vcnt_s8(a) } +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +pub fn vcgt_f16(a: float16x4_t, b: float16x4_t) -> uint16x4_t { + unsafe { simd_gt(a, b) } } -#[doc = "Population count per byte."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcntq_s8)"] +#[doc = "Floating-point compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_f16)"] +#[inline] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.f16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fcmgt) +)] +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +pub fn vcgtq_f16(a: float16x8_t, b: float16x8_t) -> uint16x8_t { + unsafe { simd_gt(a, b) } +} +#[doc = "Floating-point compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_f32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcnt))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.f32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cnt) + assert_instr(fcmgt) )] #[cfg_attr( not(target_arch = "arm"), @@ -4943,27 +5583,18 @@ pub fn vcnt_s8(a: int8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vcntq_s8(a: int8x16_t) -> int8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.ctpop.v16i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.ctpop.v16i8")] - fn _vcntq_s8(a: int8x16_t) -> int8x16_t; - } - unsafe { _vcntq_s8(a) } +pub fn vcgt_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { + unsafe { simd_gt(a, b) } } -#[doc = "Population count per byte."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcnt_u8)"] +#[doc = "Floating-point compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_f32)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcnt))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.f32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cnt) + assert_instr(fcmgt) )] #[cfg_attr( not(target_arch = "arm"), @@ -4973,19 +5604,18 @@ pub fn vcntq_s8(a: int8x16_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vcnt_u8(a: uint8x8_t) -> uint8x8_t { - unsafe { transmute(vcnt_s8(transmute(a))) } +pub fn vcgtq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { + unsafe { simd_gt(a, b) } } -#[doc = "Population count per byte."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcnt_u8)"] +#[doc = "Compare signed greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_s8)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcnt))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cnt) + assert_instr(cmgt) )] #[cfg_attr( not(target_arch = "arm"), @@ -4995,23 +5625,18 @@ pub fn vcnt_u8(a: uint8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vcnt_u8(a: uint8x8_t) -> uint8x8_t { - let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: uint8x8_t = transmute(vcnt_s8(transmute(a))); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } +pub fn vcgt_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { + unsafe { simd_gt(a, b) } } -#[doc = "Population count per byte."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcntq_u8)"] +#[doc = "Compare signed greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_s8)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcnt))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cnt) + assert_instr(cmgt) )] #[cfg_attr( not(target_arch = "arm"), @@ -5021,19 +5646,18 @@ pub fn vcnt_u8(a: uint8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vcntq_u8(a: uint8x16_t) -> uint8x16_t { - unsafe { transmute(vcntq_s8(transmute(a))) } +pub fn vcgtq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { + unsafe { simd_gt(a, b) } } -#[doc = "Population count per byte."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcntq_u8)"] +#[doc = "Compare signed greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_s16)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcnt))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cnt) + assert_instr(cmgt) )] #[cfg_attr( not(target_arch = "arm"), @@ -5043,28 +5667,18 @@ pub fn vcntq_u8(a: uint8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vcntq_u8(a: uint8x16_t) -> uint8x16_t { - let a: uint8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: uint8x16_t = transmute(vcntq_s8(transmute(a))); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - } +pub fn vcgt_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { + unsafe { simd_gt(a, b) } } -#[doc = "Population count per byte."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcnt_p8)"] +#[doc = "Compare signed greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_s16)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcnt))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cnt) + assert_instr(cmgt) )] #[cfg_attr( not(target_arch = "arm"), @@ -5074,19 +5688,18 @@ pub fn vcntq_u8(a: uint8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vcnt_p8(a: poly8x8_t) -> poly8x8_t { - unsafe { transmute(vcnt_s8(transmute(a))) } +pub fn vcgtq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { + unsafe { simd_gt(a, b) } } -#[doc = "Population count per byte."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcnt_p8)"] +#[doc = "Compare signed greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_s32)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcnt))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cnt) + assert_instr(cmgt) )] #[cfg_attr( not(target_arch = "arm"), @@ -5096,23 +5709,18 @@ pub fn vcnt_p8(a: poly8x8_t) -> poly8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vcnt_p8(a: poly8x8_t) -> poly8x8_t { - let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: poly8x8_t = transmute(vcnt_s8(transmute(a))); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } +pub fn vcgt_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { + unsafe { simd_gt(a, b) } } -#[doc = "Population count per byte."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcntq_p8)"] +#[doc = "Compare signed greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_s32)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcnt))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cnt) + assert_instr(cmgt) )] #[cfg_attr( not(target_arch = "arm"), @@ -5122,19 +5730,18 @@ pub fn vcnt_p8(a: poly8x8_t) -> poly8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vcntq_p8(a: poly8x16_t) -> poly8x16_t { - unsafe { transmute(vcntq_s8(transmute(a))) } +pub fn vcgtq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { + unsafe { simd_gt(a, b) } } -#[doc = "Population count per byte."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcntq_p8)"] +#[doc = "Compare unsigned greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_u8)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcnt))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cnt) + assert_instr(cmhi) )] #[cfg_attr( not(target_arch = "arm"), @@ -5144,34 +5751,19 @@ pub fn vcntq_p8(a: poly8x16_t) -> poly8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vcntq_p8(a: poly8x16_t) -> poly8x16_t { - let a: poly8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: poly8x16_t = transmute(vcntq_s8(transmute(a))); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - } -} -#[doc = "Join two smaller vectors into a single larger vector"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_f16)"] -#[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -#[cfg_attr(test, assert_instr(nop))] -pub fn vcombine_f16(a: float16x4_t, b: float16x4_t) -> float16x8_t { - unsafe { simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]) } +pub fn vcgt_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + unsafe { simd_gt(a, b) } } -#[doc = "Join two smaller vectors into a single larger vector"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_f32)"] +#[doc = "Compare unsigned greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_u8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(cmhi) +)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -5180,15 +5772,19 @@ pub fn vcombine_f16(a: float16x4_t, b: float16x4_t) -> float16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vcombine_f32(a: float32x2_t, b: float32x2_t) -> float32x4_t { - unsafe { simd_shuffle!(a, b, [0, 1, 2, 3]) } +pub fn vcgtq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + unsafe { simd_gt(a, b) } } -#[doc = "Join two smaller vectors into a single larger vector"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_s8)"] +#[doc = "Compare unsigned greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_u16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(cmhi) +)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -5197,15 +5793,19 @@ pub fn vcombine_f32(a: float32x2_t, b: float32x2_t) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vcombine_s8(a: int8x8_t, b: int8x8_t) -> int8x16_t { - unsafe { simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) } +pub fn vcgt_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + unsafe { simd_gt(a, b) } } -#[doc = "Join two smaller vectors into a single larger vector"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_s16)"] +#[doc = "Compare unsigned greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_u16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(cmhi) +)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -5214,15 +5814,19 @@ pub fn vcombine_s8(a: int8x8_t, b: int8x8_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vcombine_s16(a: int16x4_t, b: int16x4_t) -> int16x8_t { - unsafe { simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]) } +pub fn vcgtq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + unsafe { simd_gt(a, b) } } -#[doc = "Join two smaller vectors into a single larger vector"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_s32)"] +#[doc = "Compare unsigned greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(cmhi) +)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -5231,15 +5835,19 @@ pub fn vcombine_s16(a: int16x4_t, b: int16x4_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vcombine_s32(a: int32x2_t, b: int32x2_t) -> int32x4_t { - unsafe { simd_shuffle!(a, b, [0, 1, 2, 3]) } +pub fn vcgt_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + unsafe { simd_gt(a, b) } } -#[doc = "Join two smaller vectors into a single larger vector"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_s64)"] +#[doc = "Compare unsigned greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(cmhi) +)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -5248,49 +5856,77 @@ pub fn vcombine_s32(a: int32x2_t, b: int32x2_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vcombine_s64(a: int64x1_t, b: int64x1_t) -> int64x2_t { - unsafe { simd_shuffle!(a, b, [0, 1]) } +pub fn vcgtq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + unsafe { simd_gt(a, b) } } -#[doc = "Join two smaller vectors into a single larger vector"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_u8)"] +#[doc = "Floating-point compare greater than zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_f16)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.f16"))] #[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fcmgt) )] +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +pub fn vcgtz_f16(a: float16x4_t) -> uint16x4_t { + let b: f16x4 = f16x4::new(0.0, 0.0, 0.0, 0.0); + unsafe { simd_gt(a, transmute(b)) } +} +#[doc = "Floating-point compare greater than zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_f16)"] +#[inline] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.f16"))] #[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fcmgt) )] -pub fn vcombine_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x16_t { - unsafe { simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) } +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +pub fn vcgtzq_f16(a: float16x8_t) -> uint16x8_t { + let b: f16x8 = f16x8::new(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0); + unsafe { simd_gt(a, transmute(b)) } } -#[doc = "Join two smaller vectors into a single larger vector"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_u16)"] +#[doc = "Floating-point compare less than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_f16)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.f16"))] #[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fcmge) )] +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +pub fn vcle_f16(a: float16x4_t, b: float16x4_t) -> uint16x4_t { + unsafe { simd_le(a, b) } +} +#[doc = "Floating-point compare less than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_f16)"] +#[inline] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.f16"))] #[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fcmge) )] -pub fn vcombine_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x8_t { - unsafe { simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]) } +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +pub fn vcleq_f16(a: float16x8_t, b: float16x8_t) -> uint16x8_t { + unsafe { simd_le(a, b) } } -#[doc = "Join two smaller vectors into a single larger vector"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_u32)"] +#[doc = "Floating-point compare less than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_f32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.f32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fcmge) +)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -5299,15 +5935,19 @@ pub fn vcombine_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vcombine_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x4_t { - unsafe { simd_shuffle!(a, b, [0, 1, 2, 3]) } +pub fn vcle_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { + unsafe { simd_le(a, b) } } -#[doc = "Join two smaller vectors into a single larger vector"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_u64)"] +#[doc = "Floating-point compare less than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_f32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.f32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fcmge) +)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -5316,15 +5956,19 @@ pub fn vcombine_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vcombine_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x2_t { - unsafe { simd_shuffle!(a, b, [0, 1]) } +pub fn vcleq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { + unsafe { simd_le(a, b) } } -#[doc = "Join two smaller vectors into a single larger vector"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_p8)"] +#[doc = "Compare signed less than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_s8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(cmge) +)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -5333,15 +5977,19 @@ pub fn vcombine_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vcombine_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x16_t { - unsafe { simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) } +pub fn vcle_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { + unsafe { simd_le(a, b) } } -#[doc = "Join two smaller vectors into a single larger vector"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_p16)"] +#[doc = "Compare signed less than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_s8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(cmge) +)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -5350,15 +5998,19 @@ pub fn vcombine_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vcombine_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x8_t { - unsafe { simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]) } +pub fn vcleq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { + unsafe { simd_le(a, b) } } -#[doc = "Join two smaller vectors into a single larger vector"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_p64)"] +#[doc = "Compare signed less than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(cmge) +)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -5367,52 +6019,18 @@ pub fn vcombine_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vcombine_p64(a: poly64x1_t, b: poly64x1_t) -> poly64x2_t { - unsafe { simd_shuffle!(a, b, [0, 1]) } +pub fn vcle_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { + unsafe { simd_le(a, b) } } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_f16)"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vcreate_f16(a: u64) -> float16x4_t { - unsafe { transmute(a) } -} -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_f16)"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vcreate_f16(a: u64) -> float16x4_t { - unsafe { - let ret_val: float16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } -} -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_f32)"] +#[doc = "Compare signed less than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_s16)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(cmge) )] #[cfg_attr( not(target_arch = "arm"), @@ -5422,19 +6040,18 @@ pub fn vcreate_f16(a: u64) -> float16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vcreate_f32(a: u64) -> float32x2_t { - unsafe { transmute(a) } +pub fn vcleq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { + unsafe { simd_le(a, b) } } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_f32)"] +#[doc = "Compare signed less than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_s32)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(cmge) )] #[cfg_attr( not(target_arch = "arm"), @@ -5444,22 +6061,18 @@ pub fn vcreate_f32(a: u64) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vcreate_f32(a: u64) -> float32x2_t { - unsafe { - let ret_val: float32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } +pub fn vcle_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { + unsafe { simd_le(a, b) } } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_s8)"] +#[doc = "Compare signed less than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_s32)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(cmge) )] #[cfg_attr( not(target_arch = "arm"), @@ -5469,19 +6082,18 @@ pub fn vcreate_f32(a: u64) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vcreate_s8(a: u64) -> int8x8_t { - unsafe { transmute(a) } +pub fn vcleq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { + unsafe { simd_le(a, b) } } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_s8)"] +#[doc = "Compare unsigned less than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_u8)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(cmhs) )] #[cfg_attr( not(target_arch = "arm"), @@ -5491,22 +6103,18 @@ pub fn vcreate_s8(a: u64) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vcreate_s8(a: u64) -> int8x8_t { - unsafe { - let ret_val: int8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } +pub fn vcle_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + unsafe { simd_le(a, b) } } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_s16)"] +#[doc = "Compare unsigned less than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_u8)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(cmhs) )] #[cfg_attr( not(target_arch = "arm"), @@ -5516,19 +6124,18 @@ pub fn vcreate_s8(a: u64) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vcreate_s16(a: u64) -> int16x4_t { - unsafe { transmute(a) } +pub fn vcleq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + unsafe { simd_le(a, b) } } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_s16)"] +#[doc = "Compare unsigned less than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_u16)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(cmhs) )] #[cfg_attr( not(target_arch = "arm"), @@ -5538,22 +6145,18 @@ pub fn vcreate_s16(a: u64) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vcreate_s16(a: u64) -> int16x4_t { - unsafe { - let ret_val: int16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } +pub fn vcle_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + unsafe { simd_le(a, b) } } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_s32)"] +#[doc = "Compare unsigned less than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_u16)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(cmhs) )] #[cfg_attr( not(target_arch = "arm"), @@ -5563,19 +6166,18 @@ pub fn vcreate_s16(a: u64) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vcreate_s32(a: u64) -> int32x2_t { - unsafe { transmute(a) } +pub fn vcleq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + unsafe { simd_le(a, b) } } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_s32)"] +#[doc = "Compare unsigned less than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_u32)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(cmhs) )] #[cfg_attr( not(target_arch = "arm"), @@ -5585,21 +6187,18 @@ pub fn vcreate_s32(a: u64) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vcreate_s32(a: u64) -> int32x2_t { - unsafe { - let ret_val: int32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } +pub fn vcle_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + unsafe { simd_le(a, b) } } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_s64)"] +#[doc = "Compare unsigned less than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(cmhs) )] #[cfg_attr( not(target_arch = "arm"), @@ -5609,19 +6208,48 @@ pub fn vcreate_s32(a: u64) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vcreate_s64(a: u64) -> int64x1_t { - unsafe { transmute(a) } +pub fn vcleq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + unsafe { simd_le(a, b) } } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_u8)"] +#[doc = "Floating-point compare less than or equal to zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_f16)"] +#[inline] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcle.f16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fcmle) +)] +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +pub fn vclez_f16(a: float16x4_t) -> uint16x4_t { + let b: f16x4 = f16x4::new(0.0, 0.0, 0.0, 0.0); + unsafe { simd_le(a, transmute(b)) } +} +#[doc = "Floating-point compare less than or equal to zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_f16)"] +#[inline] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcle.f16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fcmle) +)] +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +pub fn vclezq_f16(a: float16x8_t) -> uint16x8_t { + let b: f16x8 = f16x8::new(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0); + unsafe { simd_le(a, transmute(b)) } +} +#[doc = "Count leading sign bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcls_s8)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcls.s8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(cls) )] #[cfg_attr( not(target_arch = "arm"), @@ -5631,19 +6259,26 @@ pub fn vcreate_s64(a: u64) -> int64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vcreate_u8(a: u64) -> uint8x8_t { - unsafe { transmute(a) } +pub fn vcls_s8(a: int8x8_t) -> int8x8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vcls.v8i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.cls.v8i8" + )] + fn _vcls_s8(a: int8x8_t) -> int8x8_t; + } + unsafe { _vcls_s8(a) } } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_u8)"] +#[doc = "Count leading sign bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclsq_s8)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcls.s8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(cls) )] #[cfg_attr( not(target_arch = "arm"), @@ -5653,22 +6288,26 @@ pub fn vcreate_u8(a: u64) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vcreate_u8(a: u64) -> uint8x8_t { - unsafe { - let ret_val: uint8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +pub fn vclsq_s8(a: int8x16_t) -> int8x16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vcls.v16i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.cls.v16i8" + )] + fn _vclsq_s8(a: int8x16_t) -> int8x16_t; } + unsafe { _vclsq_s8(a) } } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_u16)"] +#[doc = "Count leading sign bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcls_s16)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcls.s16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(cls) )] #[cfg_attr( not(target_arch = "arm"), @@ -5678,19 +6317,26 @@ pub fn vcreate_u8(a: u64) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vcreate_u16(a: u64) -> uint16x4_t { - unsafe { transmute(a) } +pub fn vcls_s16(a: int16x4_t) -> int16x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vcls.v4i16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.cls.v4i16" + )] + fn _vcls_s16(a: int16x4_t) -> int16x4_t; + } + unsafe { _vcls_s16(a) } } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_u16)"] +#[doc = "Count leading sign bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclsq_s16)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcls.s16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(cls) )] #[cfg_attr( not(target_arch = "arm"), @@ -5700,22 +6346,26 @@ pub fn vcreate_u16(a: u64) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vcreate_u16(a: u64) -> uint16x4_t { - unsafe { - let ret_val: uint16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +pub fn vclsq_s16(a: int16x8_t) -> int16x8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vcls.v8i16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.cls.v8i16" + )] + fn _vclsq_s16(a: int16x8_t) -> int16x8_t; } + unsafe { _vclsq_s16(a) } } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_u32)"] +#[doc = "Count leading sign bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcls_s32)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcls.s32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(cls) )] #[cfg_attr( not(target_arch = "arm"), @@ -5725,19 +6375,26 @@ pub fn vcreate_u16(a: u64) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vcreate_u32(a: u64) -> uint32x2_t { - unsafe { transmute(a) } +pub fn vcls_s32(a: int32x2_t) -> int32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vcls.v2i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.cls.v2i32" + )] + fn _vcls_s32(a: int32x2_t) -> int32x2_t; + } + unsafe { _vcls_s32(a) } } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_u32)"] +#[doc = "Count leading sign bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclsq_s32)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcls.s32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(cls) )] #[cfg_attr( not(target_arch = "arm"), @@ -5747,21 +6404,26 @@ pub fn vcreate_u32(a: u64) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vcreate_u32(a: u64) -> uint32x2_t { - unsafe { - let ret_val: uint32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) +pub fn vclsq_s32(a: int32x4_t) -> int32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vcls.v4i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.cls.v4i32" + )] + fn _vclsq_s32(a: int32x4_t) -> int32x4_t; } + unsafe { _vclsq_s32(a) } } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_u64)"] +#[doc = "Count leading sign bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcls_u8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcls))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(cls) )] #[cfg_attr( not(target_arch = "arm"), @@ -5771,19 +6433,18 @@ pub fn vcreate_u32(a: u64) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vcreate_u64(a: u64) -> uint64x1_t { - unsafe { transmute(a) } +pub fn vcls_u8(a: uint8x8_t) -> int8x8_t { + unsafe { vcls_s8(transmute(a)) } } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_p8)"] +#[doc = "Count leading sign bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclsq_u8)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcls))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(cls) )] #[cfg_attr( not(target_arch = "arm"), @@ -5793,19 +6454,18 @@ pub fn vcreate_u64(a: u64) -> uint64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vcreate_p8(a: u64) -> poly8x8_t { - unsafe { transmute(a) } +pub fn vclsq_u8(a: uint8x16_t) -> int8x16_t { + unsafe { vclsq_s8(transmute(a)) } } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_p8)"] +#[doc = "Count leading sign bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcls_u16)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcls))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(cls) )] #[cfg_attr( not(target_arch = "arm"), @@ -5815,22 +6475,18 @@ pub fn vcreate_p8(a: u64) -> poly8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vcreate_p8(a: u64) -> poly8x8_t { - unsafe { - let ret_val: poly8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } +pub fn vcls_u16(a: uint16x4_t) -> int16x4_t { + unsafe { vcls_s16(transmute(a)) } } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_p16)"] +#[doc = "Count leading sign bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclsq_u16)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcls))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(cls) )] #[cfg_attr( not(target_arch = "arm"), @@ -5840,19 +6496,18 @@ pub fn vcreate_p8(a: u64) -> poly8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vcreate_p16(a: u64) -> poly16x4_t { - unsafe { transmute(a) } +pub fn vclsq_u16(a: uint16x8_t) -> int16x8_t { + unsafe { vclsq_s16(transmute(a)) } } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_p16)"] +#[doc = "Count leading sign bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcls_u32)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcls))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(cls) )] #[cfg_attr( not(target_arch = "arm"), @@ -5862,21 +6517,18 @@ pub fn vcreate_p16(a: u64) -> poly16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vcreate_p16(a: u64) -> poly16x4_t { - unsafe { - let ret_val: poly16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } +pub fn vcls_u32(a: uint32x2_t) -> int32x2_t { + unsafe { vcls_s32(transmute(a)) } } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_p64)"] +#[doc = "Count leading sign bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclsq_u32)"] #[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcls))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(cls) )] #[cfg_attr( not(target_arch = "arm"), @@ -5886,102 +6538,67 @@ pub fn vcreate_p16(a: u64) -> poly16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vcreate_p64(a: u64) -> poly64x1_t { - unsafe { transmute(a) } -} -#[doc = "Floating-point convert to lower precision narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f16_f32)"] -#[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -# [cfg_attr (all (test , target_arch = "arm") , assert_instr (vcvt . f16 . f32))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fcvtn) -)] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vcvt_f16_f32(a: float32x4_t) -> float16x4_t { - unsafe { simd_cast(a) } +pub fn vclsq_u32(a: uint32x4_t) -> int32x4_t { + unsafe { vclsq_s32(transmute(a)) } } -#[doc = "Fixed-point convert to floating-point"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f16_s16)"] +#[doc = "Floating-point compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_f16)"] #[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.f16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(scvtf) + assert_instr(fcmgt) )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vcvt_f16_s16(a: int16x4_t) -> float16x4_t { - unsafe { simd_cast(a) } +pub fn vclt_f16(a: float16x4_t, b: float16x4_t) -> uint16x4_t { + unsafe { simd_lt(a, b) } } -#[doc = "Fixed-point convert to floating-point"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_f16_s16)"] +#[doc = "Floating-point compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_f16)"] #[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.f16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(scvtf) + assert_instr(fcmgt) )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vcvtq_f16_s16(a: int16x8_t) -> float16x8_t { - unsafe { simd_cast(a) } +pub fn vcltq_f16(a: float16x8_t, b: float16x8_t) -> uint16x8_t { + unsafe { simd_lt(a, b) } } -#[doc = "Fixed-point convert to floating-point"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f16_u16)"] +#[doc = "Floating-point compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_f32)"] #[inline] +#[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.f32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ucvtf) + assert_instr(fcmgt) )] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vcvt_f16_u16(a: uint16x4_t) -> float16x4_t { - unsafe { simd_cast(a) } -} -#[doc = "Fixed-point convert to floating-point"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_f16_u16)"] -#[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] #[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ucvtf) + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") )] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vcvtq_f16_u16(a: uint16x8_t) -> float16x8_t { - unsafe { simd_cast(a) } -} -#[doc = "Floating-point convert to higher precision long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f32_f16)"] -#[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] #[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fcvtl) + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vcvt_f32_f16(a: float16x4_t) -> float32x4_t { - unsafe { simd_cast(a) } +pub fn vclt_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { + unsafe { simd_lt(a, b) } } -#[doc = "Fixed-point convert to floating-point"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f32_s32)"] +#[doc = "Floating-point compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_f32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.f32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(scvtf) + assert_instr(fcmgt) )] #[cfg_attr( not(target_arch = "arm"), @@ -5991,18 +6608,18 @@ pub fn vcvt_f32_f16(a: float16x4_t) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vcvt_f32_s32(a: int32x2_t) -> float32x2_t { - unsafe { simd_cast(a) } +pub fn vcltq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { + unsafe { simd_lt(a, b) } } -#[doc = "Fixed-point convert to floating-point"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_f32_s32)"] +#[doc = "Compare signed less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_s8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(scvtf) + assert_instr(cmgt) )] #[cfg_attr( not(target_arch = "arm"), @@ -6012,18 +6629,18 @@ pub fn vcvt_f32_s32(a: int32x2_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vcvtq_f32_s32(a: int32x4_t) -> float32x4_t { - unsafe { simd_cast(a) } -} -#[doc = "Fixed-point convert to floating-point"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f32_u32)"] +pub fn vclt_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { + unsafe { simd_lt(a, b) } +} +#[doc = "Compare signed less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_s8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ucvtf) + assert_instr(cmgt) )] #[cfg_attr( not(target_arch = "arm"), @@ -6033,18 +6650,18 @@ pub fn vcvtq_f32_s32(a: int32x4_t) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vcvt_f32_u32(a: uint32x2_t) -> float32x2_t { - unsafe { simd_cast(a) } +pub fn vcltq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { + unsafe { simd_lt(a, b) } } -#[doc = "Fixed-point convert to floating-point"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_f32_u32)"] +#[doc = "Compare signed less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ucvtf) + assert_instr(cmgt) )] #[cfg_attr( not(target_arch = "arm"), @@ -6054,566 +6671,482 @@ pub fn vcvt_f32_u32(a: uint32x2_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vcvtq_f32_u32(a: uint32x4_t) -> float32x4_t { - unsafe { simd_cast(a) } +pub fn vclt_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { + unsafe { simd_lt(a, b) } } -#[doc = "Fixed-point convert to floating-point"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_f16_s16)"] +#[doc = "Compare signed less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_s16)"] #[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcvt", N = 1))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(scvtf, N = 1) + assert_instr(cmgt) )] -#[rustc_legacy_const_generics(1)] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vcvt_n_f16_s16(a: int16x4_t) -> float16x4_t { - static_assert!(N >= 1 && N <= 16); - unsafe extern "unadjusted" { - #[cfg_attr( - target_arch = "arm", - link_name = "llvm.arm.neon.vcvtfxs2fp.v4f16.v4i16" - )] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vcvtfxs2fp.v4f16.v4i16" - )] - fn _vcvt_n_f16_s16(a: int16x4_t, n: i32) -> float16x4_t; - } - unsafe { _vcvt_n_f16_s16(a, N) } +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vcltq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { + unsafe { simd_lt(a, b) } } -#[doc = "Fixed-point convert to floating-point"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_f16_s16)"] +#[doc = "Compare signed less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_s32)"] #[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcvt", N = 1))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(scvtf, N = 1) + assert_instr(cmgt) )] -#[rustc_legacy_const_generics(1)] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vcvtq_n_f16_s16(a: int16x8_t) -> float16x8_t { - static_assert!(N >= 1 && N <= 16); - unsafe extern "unadjusted" { - #[cfg_attr( - target_arch = "arm", - link_name = "llvm.arm.neon.vcvtfxs2fp.v8f16.v8i16" - )] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vcvtfxs2fp.v8f16.v8i16" - )] - fn _vcvtq_n_f16_s16(a: int16x8_t, n: i32) -> float16x8_t; - } - unsafe { _vcvtq_n_f16_s16(a, N) } +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vclt_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { + unsafe { simd_lt(a, b) } } -#[doc = "Fixed-point convert to floating-point"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_f16_u16)"] +#[doc = "Compare signed less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_s32)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(cmgt) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vcltq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { + unsafe { simd_lt(a, b) } +} +#[doc = "Compare unsigned less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_u8)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(cmhi) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vclt_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + unsafe { simd_lt(a, b) } +} +#[doc = "Compare unsigned less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_u8)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(cmhi) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vcltq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + unsafe { simd_lt(a, b) } +} +#[doc = "Compare unsigned less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_u16)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(cmhi) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vclt_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + unsafe { simd_lt(a, b) } +} +#[doc = "Compare unsigned less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_u16)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(cmhi) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vcltq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + unsafe { simd_lt(a, b) } +} +#[doc = "Compare unsigned less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_u32)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(cmhi) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vclt_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + unsafe { simd_lt(a, b) } +} +#[doc = "Compare unsigned less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_u32)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(cmhi) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vcltq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + unsafe { simd_lt(a, b) } +} +#[doc = "Floating-point compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_f16)"] #[inline] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcvt", N = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclt.f16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ucvtf, N = 1) + assert_instr(fcmlt) )] -#[rustc_legacy_const_generics(1)] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vcvt_n_f16_u16(a: uint16x4_t) -> float16x4_t { - static_assert!(N >= 1 && N <= 16); - unsafe extern "unadjusted" { - #[cfg_attr( - target_arch = "arm", - link_name = "llvm.arm.neon.vcvtfxu2fp.v4f16.v4i16" - )] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vcvtfxu2fp.v4f16.v4i16" - )] - fn _vcvt_n_f16_u16(a: uint16x4_t, n: i32) -> float16x4_t; - } - unsafe { _vcvt_n_f16_u16(a, N) } +pub fn vcltz_f16(a: float16x4_t) -> uint16x4_t { + let b: f16x4 = f16x4::new(0.0, 0.0, 0.0, 0.0); + unsafe { simd_lt(a, transmute(b)) } } -#[doc = "Fixed-point convert to floating-point"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_f16_u16)"] +#[doc = "Floating-point compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_f16)"] #[inline] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcvt", N = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclt.f16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ucvtf, N = 1) + assert_instr(fcmlt) )] -#[rustc_legacy_const_generics(1)] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vcvtq_n_f16_u16(a: uint16x8_t) -> float16x8_t { - static_assert!(N >= 1 && N <= 16); - unsafe extern "unadjusted" { - #[cfg_attr( - target_arch = "arm", - link_name = "llvm.arm.neon.vcvtfxu2fp.v8f16.v8i16" - )] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vcvtfxu2fp.v8f16.v8i16" - )] - fn _vcvtq_n_f16_u16(a: uint16x8_t, n: i32) -> float16x8_t; - } - unsafe { _vcvtq_n_f16_u16(a, N) } +pub fn vcltzq_f16(a: float16x8_t) -> uint16x8_t { + let b: f16x8 = f16x8::new(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0); + unsafe { simd_lt(a, transmute(b)) } } -#[doc = "Fixed-point convert to floating-point"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_f32_s32)"] +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclz_s8)"] #[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vcvt, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub fn vcvt_n_f32_s32(a: int32x2_t) -> float32x2_t { - static_assert!(N >= 1 && N <= 32); +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(clz) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vclz_s8(a: int8x8_t) -> int8x8_t { unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.ctlz.v8i8")] #[cfg_attr( - target_arch = "arm", - link_name = "llvm.arm.neon.vcvtfxs2fp.v2f32.v2i32" + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.ctlz.v8i8" )] - fn _vcvt_n_f32_s32(a: int32x2_t, n: i32) -> float32x2_t; + fn _vclz_s8(a: int8x8_t) -> int8x8_t; } - unsafe { _vcvt_n_f32_s32(a, N) } + unsafe { _vclz_s8(a) } } -#[doc = "Fixed-point convert to floating-point"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_f32_s32)"] +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclzq_s8)"] #[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vcvt, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub fn vcvtq_n_f32_s32(a: int32x4_t) -> float32x4_t { - static_assert!(N >= 1 && N <= 32); +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(clz) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vclzq_s8(a: int8x16_t) -> int8x16_t { unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.ctlz.v16i8")] #[cfg_attr( - target_arch = "arm", - link_name = "llvm.arm.neon.vcvtfxs2fp.v4f32.v4i32" + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.ctlz.v16i8" )] - fn _vcvtq_n_f32_s32(a: int32x4_t, n: i32) -> float32x4_t; + fn _vclzq_s8(a: int8x16_t) -> int8x16_t; } - unsafe { _vcvtq_n_f32_s32(a, N) } + unsafe { _vclzq_s8(a) } } -#[doc = "Fixed-point convert to floating-point"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_f32_s32)"] +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclz_s16)"] #[inline] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(scvtf, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vcvt_n_f32_s32(a: int32x2_t) -> float32x2_t { - static_assert!(N >= 1 && N <= 32); +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(clz) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vclz_s16(a: int16x4_t) -> int16x4_t { unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.ctlz.v4i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vcvtfxs2fp.v2f32.v2i32" + link_name = "llvm.ctlz.v4i16" )] - fn _vcvt_n_f32_s32(a: int32x2_t, n: i32) -> float32x2_t; + fn _vclz_s16(a: int16x4_t) -> int16x4_t; } - unsafe { _vcvt_n_f32_s32(a, N) } + unsafe { _vclz_s16(a) } } -#[doc = "Fixed-point convert to floating-point"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_f32_s32)"] +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclzq_s16)"] #[inline] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(scvtf, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vcvtq_n_f32_s32(a: int32x4_t) -> float32x4_t { - static_assert!(N >= 1 && N <= 32); +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(clz) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vclzq_s16(a: int16x8_t) -> int16x8_t { unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.ctlz.v8i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vcvtfxs2fp.v4f32.v4i32" + link_name = "llvm.ctlz.v8i16" )] - fn _vcvtq_n_f32_s32(a: int32x4_t, n: i32) -> float32x4_t; + fn _vclzq_s16(a: int16x8_t) -> int16x8_t; } - unsafe { _vcvtq_n_f32_s32(a, N) } + unsafe { _vclzq_s16(a) } } -#[doc = "Fixed-point convert to floating-point"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_f32_u32)"] +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclz_s32)"] #[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vcvt, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub fn vcvt_n_f32_u32(a: uint32x2_t) -> float32x2_t { - static_assert!(N >= 1 && N <= 32); +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(clz) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vclz_s32(a: int32x2_t) -> int32x2_t { unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.ctlz.v2i32")] #[cfg_attr( - target_arch = "arm", - link_name = "llvm.arm.neon.vcvtfxu2fp.v2f32.v2i32" + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.ctlz.v2i32" )] - fn _vcvt_n_f32_u32(a: uint32x2_t, n: i32) -> float32x2_t; + fn _vclz_s32(a: int32x2_t) -> int32x2_t; } - unsafe { _vcvt_n_f32_u32(a, N) } + unsafe { _vclz_s32(a) } } -#[doc = "Fixed-point convert to floating-point"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_f32_u32)"] +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclzq_s32)"] #[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vcvt, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub fn vcvtq_n_f32_u32(a: uint32x4_t) -> float32x4_t { - static_assert!(N >= 1 && N <= 32); +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(clz) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vclzq_s32(a: int32x4_t) -> int32x4_t { unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.ctlz.v4i32")] #[cfg_attr( - target_arch = "arm", - link_name = "llvm.arm.neon.vcvtfxu2fp.v4f32.v4i32" + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.ctlz.v4i32" )] - fn _vcvtq_n_f32_u32(a: uint32x4_t, n: i32) -> float32x4_t; + fn _vclzq_s32(a: int32x4_t) -> int32x4_t; } - unsafe { _vcvtq_n_f32_u32(a, N) } + unsafe { _vclzq_s32(a) } } -#[doc = "Fixed-point convert to floating-point"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_f32_u32)"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ucvtf, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vcvt_n_f32_u32(a: uint32x2_t) -> float32x2_t { - static_assert!(N >= 1 && N <= 32); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vcvtfxu2fp.v2f32.v2i32" - )] - fn _vcvt_n_f32_u32(a: uint32x2_t, n: i32) -> float32x2_t; - } - unsafe { _vcvt_n_f32_u32(a, N) } -} -#[doc = "Fixed-point convert to floating-point"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_f32_u32)"] +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclz_u16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ucvtf, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vcvtq_n_f32_u32(a: uint32x4_t) -> float32x4_t { - static_assert!(N >= 1 && N <= 32); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vcvtfxu2fp.v4f32.v4i32" - )] - fn _vcvtq_n_f32_u32(a: uint32x4_t, n: i32) -> float32x4_t; - } - unsafe { _vcvtq_n_f32_u32(a, N) } -} -#[doc = "Floating-point convert to signed fixed-point"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_s16_f16)"] -#[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcvt", N = 1))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fcvtzs, N = 1) + assert_instr(clz) )] -#[rustc_legacy_const_generics(1)] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vcvt_n_s16_f16(a: float16x4_t) -> int16x4_t { - static_assert!(N >= 1 && N <= 16); - unsafe extern "unadjusted" { - #[cfg_attr( - target_arch = "arm", - link_name = "llvm.arm.neon.vcvtfp2fxs.v4i16.v4f16" - )] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vcvtfp2fxs.v4i16.v4f16" - )] - fn _vcvt_n_s16_f16(a: float16x4_t, n: i32) -> int16x4_t; - } - unsafe { _vcvt_n_s16_f16(a, N) } -} -#[doc = "Floating-point convert to signed fixed-point"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_s16_f16)"] -#[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcvt", N = 1))] #[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fcvtzs, N = 1) + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") )] -#[rustc_legacy_const_generics(1)] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vcvtq_n_s16_f16(a: float16x8_t) -> int16x8_t { - static_assert!(N >= 1 && N <= 16); - unsafe extern "unadjusted" { - #[cfg_attr( - target_arch = "arm", - link_name = "llvm.arm.neon.vcvtfp2fxs.v8i16.v8f16" - )] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vcvtfp2fxs.v8i16.v8f16" - )] - fn _vcvtq_n_s16_f16(a: float16x8_t, n: i32) -> int16x8_t; - } - unsafe { _vcvtq_n_s16_f16(a, N) } -} -#[doc = "Floating-point convert to fixed-point, rounding toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_s32_f32)"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vcvt, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub fn vcvt_n_s32_f32(a: float32x2_t) -> int32x2_t { - static_assert!(N >= 1 && N <= 32); - unsafe extern "unadjusted" { - #[cfg_attr( - target_arch = "arm", - link_name = "llvm.arm.neon.vcvtfp2fxs.v2i32.v2f32" - )] - fn _vcvt_n_s32_f32(a: float32x2_t, n: i32) -> int32x2_t; - } - unsafe { _vcvt_n_s32_f32(a, N) } -} -#[doc = "Floating-point convert to fixed-point, rounding toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_s32_f32)"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vcvt, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub fn vcvtq_n_s32_f32(a: float32x4_t) -> int32x4_t { - static_assert!(N >= 1 && N <= 32); - unsafe extern "unadjusted" { - #[cfg_attr( - target_arch = "arm", - link_name = "llvm.arm.neon.vcvtfp2fxs.v4i32.v4f32" - )] - fn _vcvtq_n_s32_f32(a: float32x4_t, n: i32) -> int32x4_t; - } - unsafe { _vcvtq_n_s32_f32(a, N) } -} -#[doc = "Floating-point convert to fixed-point, rounding toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_s32_f32)"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(fcvtzs, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vcvt_n_s32_f32(a: float32x2_t) -> int32x2_t { - static_assert!(N >= 1 && N <= 32); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vcvtfp2fxs.v2i32.v2f32" - )] - fn _vcvt_n_s32_f32(a: float32x2_t, n: i32) -> int32x2_t; - } - unsafe { _vcvt_n_s32_f32(a, N) } +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vclz_u16(a: uint16x4_t) -> uint16x4_t { + unsafe { transmute(vclz_s16(transmute(a))) } } -#[doc = "Floating-point convert to fixed-point, rounding toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_s32_f32)"] +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclz_u16)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(fcvtzs, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vcvtq_n_s32_f32(a: float32x4_t) -> int32x4_t { - static_assert!(N >= 1 && N <= 32); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vcvtfp2fxs.v4i32.v4f32" - )] - fn _vcvtq_n_s32_f32(a: float32x4_t, n: i32) -> int32x4_t; - } - unsafe { _vcvtq_n_s32_f32(a, N) } -} -#[doc = "Fixed-point convert to unsigned fixed-point, rounding toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_u16_f16)"] -#[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcvt", N = 1))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fcvtzu, N = 1) + assert_instr(clz) )] -#[rustc_legacy_const_generics(1)] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vcvt_n_u16_f16(a: float16x4_t) -> uint16x4_t { - static_assert!(N >= 1 && N <= 16); - unsafe extern "unadjusted" { - #[cfg_attr( - target_arch = "arm", - link_name = "llvm.arm.neon.vcvtfp2fxu.v4i16.v4f16" - )] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vcvtfp2fxu.v4i16.v4f16" - )] - fn _vcvt_n_u16_f16(a: float16x4_t, n: i32) -> uint16x4_t; - } - unsafe { _vcvt_n_u16_f16(a, N) } -} -#[doc = "Fixed-point convert to unsigned fixed-point, rounding toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_u16_f16)"] -#[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcvt", N = 1))] #[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fcvtzu, N = 1) + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") )] -#[rustc_legacy_const_generics(1)] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vcvtq_n_u16_f16(a: float16x8_t) -> uint16x8_t { - static_assert!(N >= 1 && N <= 16); - unsafe extern "unadjusted" { - #[cfg_attr( - target_arch = "arm", - link_name = "llvm.arm.neon.vcvtfp2fxu.v8i16.v8f16" - )] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vcvtfp2fxu.v8i16.v8f16" - )] - fn _vcvtq_n_u16_f16(a: float16x8_t, n: i32) -> uint16x8_t; - } - unsafe { _vcvtq_n_u16_f16(a, N) } -} -#[doc = "Floating-point convert to fixed-point, rounding toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_u32_f32)"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vcvt, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub fn vcvt_n_u32_f32(a: float32x2_t) -> uint32x2_t { - static_assert!(N >= 1 && N <= 32); - unsafe extern "unadjusted" { - #[cfg_attr( - target_arch = "arm", - link_name = "llvm.arm.neon.vcvtfp2fxu.v2i32.v2f32" - )] - fn _vcvt_n_u32_f32(a: float32x2_t, n: i32) -> uint32x2_t; - } - unsafe { _vcvt_n_u32_f32(a, N) } -} -#[doc = "Floating-point convert to fixed-point, rounding toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_u32_f32)"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vcvt, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub fn vcvtq_n_u32_f32(a: float32x4_t) -> uint32x4_t { - static_assert!(N >= 1 && N <= 32); - unsafe extern "unadjusted" { - #[cfg_attr( - target_arch = "arm", - link_name = "llvm.arm.neon.vcvtfp2fxu.v4i32.v4f32" - )] - fn _vcvtq_n_u32_f32(a: float32x4_t, n: i32) -> uint32x4_t; - } - unsafe { _vcvtq_n_u32_f32(a, N) } -} -#[doc = "Floating-point convert to fixed-point, rounding toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_u32_f32)"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(fcvtzu, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vcvt_n_u32_f32(a: float32x2_t) -> uint32x2_t { - static_assert!(N >= 1 && N <= 32); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vcvtfp2fxu.v2i32.v2f32" - )] - fn _vcvt_n_u32_f32(a: float32x2_t, n: i32) -> uint32x2_t; +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vclz_u16(a: uint16x4_t) -> uint16x4_t { + let a: uint16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; + unsafe { + let ret_val: uint16x4_t = transmute(vclz_s16(transmute(a))); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } - unsafe { _vcvt_n_u32_f32(a, N) } } -#[doc = "Floating-point convert to fixed-point, rounding toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_u32_f32)"] +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclzq_u16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(fcvtzu, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vcvtq_n_u32_f32(a: float32x4_t) -> uint32x4_t { - static_assert!(N >= 1 && N <= 32); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vcvtfp2fxu.v4i32.v4f32" - )] - fn _vcvtq_n_u32_f32(a: float32x4_t, n: i32) -> uint32x4_t; - } - unsafe { _vcvtq_n_u32_f32(a, N) } -} -#[doc = "Floating-point convert to signed fixed-point, rounding toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_s16_f16)"] -#[inline] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fcvtzs) + assert_instr(clz) )] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vcvt_s16_f16(a: float16x4_t) -> int16x4_t { - unsafe { simd_cast(a) } -} -#[doc = "Floating-point convert to signed fixed-point, rounding toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_s16_f16)"] -#[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] #[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fcvtzs) + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") )] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vcvtq_s16_f16(a: float16x8_t) -> int16x8_t { - unsafe { simd_cast(a) } +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vclzq_u16(a: uint16x8_t) -> uint16x8_t { + unsafe { transmute(vclzq_s16(transmute(a))) } } -#[doc = "Floating-point convert to signed fixed-point, rounding toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_s32_f32)"] +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclzq_u16)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fcvtzs) + assert_instr(clz) )] #[cfg_attr( not(target_arch = "arm"), @@ -6623,26 +7156,23 @@ pub fn vcvtq_s16_f16(a: float16x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vcvt_s32_f32(a: float32x2_t) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.fptosi.sat.v2i32.v2f32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.fptosi.sat.v2i32.v2f32" - )] - fn _vcvt_s32_f32(a: float32x2_t) -> int32x2_t; +pub fn vclzq_u16(a: uint16x8_t) -> uint16x8_t { + let a: uint16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; + unsafe { + let ret_val: uint16x8_t = transmute(vclzq_s16(transmute(a))); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - unsafe { _vcvt_s32_f32(a) } } -#[doc = "Floating-point convert to signed fixed-point, rounding toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_s32_f32)"] +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclz_u32)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fcvtzs) + assert_instr(clz) )] #[cfg_attr( not(target_arch = "arm"), @@ -6652,54 +7182,45 @@ pub fn vcvt_s32_f32(a: float32x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vcvtq_s32_f32(a: float32x4_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.fptosi.sat.v4i32.v4f32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.fptosi.sat.v4i32.v4f32" - )] - fn _vcvtq_s32_f32(a: float32x4_t) -> int32x4_t; - } - unsafe { _vcvtq_s32_f32(a) } +pub fn vclz_u32(a: uint32x2_t) -> uint32x2_t { + unsafe { transmute(vclz_s32(transmute(a))) } } -#[doc = "Floating-point convert to unsigned fixed-point, rounding toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_u16_f16)"] +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclz_u32)"] #[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fcvtzu) + assert_instr(clz) )] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vcvt_u16_f16(a: float16x4_t) -> uint16x4_t { - unsafe { simd_cast(a) } -} -#[doc = "Floating-point convert to unsigned fixed-point, rounding toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_u16_f16)"] -#[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] #[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fcvtzu) + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") )] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vcvtq_u16_f16(a: float16x8_t) -> uint16x8_t { - unsafe { simd_cast(a) } +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vclz_u32(a: uint32x2_t) -> uint32x2_t { + let a: uint32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; + unsafe { + let ret_val: uint32x2_t = transmute(vclz_s32(transmute(a))); + simd_shuffle!(ret_val, ret_val, [1, 0]) + } } -#[doc = "Floating-point convert to unsigned fixed-point, rounding toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_u32_f32)"] +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclzq_u32)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fcvtzu) + assert_instr(clz) )] #[cfg_attr( not(target_arch = "arm"), @@ -6709,26 +7230,19 @@ pub fn vcvtq_u16_f16(a: float16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vcvt_u32_f32(a: float32x2_t) -> uint32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.fptoui.sat.v2i32.v2f32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.fptoui.sat.v2i32.v2f32" - )] - fn _vcvt_u32_f32(a: float32x2_t) -> uint32x2_t; - } - unsafe { _vcvt_u32_f32(a) } +pub fn vclzq_u32(a: uint32x4_t) -> uint32x4_t { + unsafe { transmute(vclzq_s32(transmute(a))) } } -#[doc = "Floating-point convert to unsigned fixed-point, rounding toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_u32_f32)"] +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclzq_u32)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fcvtzu) + assert_instr(clz) )] #[cfg_attr( not(target_arch = "arm"), @@ -6738,292 +7252,284 @@ pub fn vcvt_u32_f32(a: float32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vcvtq_u32_f32(a: float32x4_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.fptoui.sat.v4i32.v4f32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.fptoui.sat.v4i32.v4f32" - )] - fn _vcvtq_u32_f32(a: float32x4_t) -> uint32x4_t; +pub fn vclzq_u32(a: uint32x4_t) -> uint32x4_t { + let a: uint32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; + unsafe { + let ret_val: uint32x4_t = transmute(vclzq_s32(transmute(a))); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } - unsafe { _vcvtq_u32_f32(a) } } -#[doc = "Dot product arithmetic (indexed)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdot_lane_s32)"] +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclz_u8)"] #[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[target_feature(enable = "neon,dotprod")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsdot, LANE = 0))] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sdot, LANE = 0) + assert_instr(clz) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), - unstable(feature = "stdarch_neon_dotprod", issue = "117224") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vdot_lane_s32(a: int32x2_t, b: int8x8_t, c: int8x8_t) -> int32x2_t { - static_assert_uimm_bits!(LANE, 1); - unsafe { - let c: int32x2_t = transmute(c); - let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]); - vdot_s32(a, b, transmute(c)) - } +pub fn vclz_u8(a: uint8x8_t) -> uint8x8_t { + unsafe { transmute(vclz_s8(transmute(a))) } } -#[doc = "Dot product arithmetic (indexed)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdotq_lane_s32)"] +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclz_u8)"] #[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[target_feature(enable = "neon,dotprod")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsdot, LANE = 0))] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sdot, LANE = 0) + assert_instr(clz) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), - unstable(feature = "stdarch_neon_dotprod", issue = "117224") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vdotq_lane_s32(a: int32x4_t, b: int8x16_t, c: int8x8_t) -> int32x4_t { - static_assert_uimm_bits!(LANE, 1); +pub fn vclz_u8(a: uint8x8_t) -> uint8x8_t { + let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { - let c: int32x2_t = transmute(c); - let c: int32x4_t = - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); - vdotq_s32(a, b, transmute(c)) + let ret_val: uint8x8_t = transmute(vclz_s8(transmute(a))); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } } -#[doc = "Dot product arithmetic (indexed)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdot_lane_u32)"] +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclzq_u8)"] #[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[target_feature(enable = "neon,dotprod")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vudot, LANE = 0))] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(udot, LANE = 0) + assert_instr(clz) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), - unstable(feature = "stdarch_neon_dotprod", issue = "117224") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vdot_lane_u32(a: uint32x2_t, b: uint8x8_t, c: uint8x8_t) -> uint32x2_t { - static_assert_uimm_bits!(LANE, 1); - unsafe { - let c: uint32x2_t = transmute(c); - let c: uint32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]); - vdot_u32(a, b, transmute(c)) - } +pub fn vclzq_u8(a: uint8x16_t) -> uint8x16_t { + unsafe { transmute(vclzq_s8(transmute(a))) } } -#[doc = "Dot product arithmetic (indexed)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdotq_lane_u32)"] +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclzq_u8)"] #[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[target_feature(enable = "neon,dotprod")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vudot, LANE = 0))] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(udot, LANE = 0) + assert_instr(clz) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), - unstable(feature = "stdarch_neon_dotprod", issue = "117224") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vdotq_lane_u32(a: uint32x4_t, b: uint8x16_t, c: uint8x8_t) -> uint32x4_t { - static_assert_uimm_bits!(LANE, 1); +pub fn vclzq_u8(a: uint8x16_t) -> uint8x16_t { + let a: uint8x16_t = + unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { - let c: uint32x2_t = transmute(c); - let c: uint32x4_t = - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); - vdotq_u32(a, b, transmute(c)) + let ret_val: uint8x16_t = transmute(vclzq_s8(transmute(a))); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) } } -#[doc = "Dot product arithmetic (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdot_s32)"] +#[doc = "Population count per byte."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcnt_s8)"] #[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[target_feature(enable = "neon,dotprod")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsdot))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcnt))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sdot) + assert_instr(cnt) )] #[cfg_attr( not(target_arch = "arm"), - unstable(feature = "stdarch_neon_dotprod", issue = "117224") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vdot_s32(a: int32x2_t, b: int8x8_t, c: int8x8_t) -> int32x2_t { +pub fn vcnt_s8(a: int8x8_t) -> int8x8_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sdot.v2i32.v8i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sdot.v2i32.v8i8" + link_name = "llvm.ctpop.v8i8" )] - fn _vdot_s32(a: int32x2_t, b: int8x8_t, c: int8x8_t) -> int32x2_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.ctpop.v8i8")] + fn _vcnt_s8(a: int8x8_t) -> int8x8_t; } - unsafe { _vdot_s32(a, b, c) } + unsafe { _vcnt_s8(a) } } -#[doc = "Dot product arithmetic (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdotq_s32)"] +#[doc = "Population count per byte."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcntq_s8)"] #[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[target_feature(enable = "neon,dotprod")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsdot))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcnt))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sdot) + assert_instr(cnt) )] #[cfg_attr( not(target_arch = "arm"), - unstable(feature = "stdarch_neon_dotprod", issue = "117224") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vdotq_s32(a: int32x4_t, b: int8x16_t, c: int8x16_t) -> int32x4_t { +pub fn vcntq_s8(a: int8x16_t) -> int8x16_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sdot.v4i32.v16i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sdot.v4i32.v16i8" + link_name = "llvm.ctpop.v16i8" )] - fn _vdotq_s32(a: int32x4_t, b: int8x16_t, c: int8x16_t) -> int32x4_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.ctpop.v16i8")] + fn _vcntq_s8(a: int8x16_t) -> int8x16_t; } - unsafe { _vdotq_s32(a, b, c) } + unsafe { _vcntq_s8(a) } } -#[doc = "Dot product arithmetic (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdot_u32)"] +#[doc = "Population count per byte."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcnt_u8)"] #[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[target_feature(enable = "neon,dotprod")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vudot))] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcnt))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(udot) + assert_instr(cnt) )] #[cfg_attr( not(target_arch = "arm"), - unstable(feature = "stdarch_neon_dotprod", issue = "117224") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vdot_u32(a: uint32x2_t, b: uint8x8_t, c: uint8x8_t) -> uint32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.udot.v2i32.v8i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.udot.v2i32.v8i8" - )] - fn _vdot_u32(a: uint32x2_t, b: uint8x8_t, c: uint8x8_t) -> uint32x2_t; - } - unsafe { _vdot_u32(a, b, c) } +pub fn vcnt_u8(a: uint8x8_t) -> uint8x8_t { + unsafe { transmute(vcnt_s8(transmute(a))) } } -#[doc = "Dot product arithmetic (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdotq_u32)"] +#[doc = "Population count per byte."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcnt_u8)"] #[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[target_feature(enable = "neon,dotprod")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vudot))] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcnt))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(udot) + assert_instr(cnt) )] #[cfg_attr( not(target_arch = "arm"), - unstable(feature = "stdarch_neon_dotprod", issue = "117224") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vdotq_u32(a: uint32x4_t, b: uint8x16_t, c: uint8x16_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.udot.v4i32.v16i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.udot.v4i32.v16i8" - )] - fn _vdotq_u32(a: uint32x4_t, b: uint8x16_t, c: uint8x16_t) -> uint32x4_t; +pub fn vcnt_u8(a: uint8x8_t) -> uint8x8_t { + let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; + unsafe { + let ret_val: uint8x8_t = transmute(vcnt_s8(transmute(a))); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - unsafe { _vdotq_u32(a, b, c) } } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_f16)"] +#[doc = "Population count per byte."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcntq_u8)"] #[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcnt))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 2) + assert_instr(cnt) )] -#[rustc_legacy_const_generics(1)] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vdup_lane_f16(a: float16x4_t) -> float16x4_t { - static_assert_uimm_bits!(N, 2); - unsafe { simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) } +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vcntq_u8(a: uint8x16_t) -> uint8x16_t { + unsafe { transmute(vcntq_s8(transmute(a))) } } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_f16)"] +#[doc = "Population count per byte."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcntq_u8)"] #[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcnt))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 2) + assert_instr(cnt) )] -#[rustc_legacy_const_generics(1)] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vdupq_lane_f16(a: float16x4_t) -> float16x8_t { - static_assert_uimm_bits!(N, 2); +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vcntq_u8(a: uint8x16_t) -> uint8x16_t { + let a: uint8x16_t = + unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let ret_val: uint8x16_t = transmute(vcntq_s8(transmute(a))); simd_shuffle!( - a, - a, - [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32] + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] ) } } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_f32)"] +#[doc = "Population count per byte."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcnt_p8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcnt))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 1) + assert_instr(cnt) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -7032,21 +7538,20 @@ pub fn vdupq_lane_f16(a: float16x4_t) -> float16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vdup_lane_f32(a: float32x2_t) -> float32x2_t { - static_assert_uimm_bits!(N, 1); - unsafe { simd_shuffle!(a, a, [N as u32, N as u32]) } +pub fn vcnt_p8(a: poly8x8_t) -> poly8x8_t { + unsafe { transmute(vcnt_s8(transmute(a))) } } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_s32)"] +#[doc = "Population count per byte."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcnt_p8)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcnt))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 1) + assert_instr(cnt) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -7055,21 +7560,24 @@ pub fn vdup_lane_f32(a: float32x2_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vdup_lane_s32(a: int32x2_t) -> int32x2_t { - static_assert_uimm_bits!(N, 1); - unsafe { simd_shuffle!(a, a, [N as u32, N as u32]) } +pub fn vcnt_p8(a: poly8x8_t) -> poly8x8_t { + let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; + unsafe { + let ret_val: poly8x8_t = transmute(vcnt_s8(transmute(a))); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + } } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_u32)"] +#[doc = "Population count per byte."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcntq_p8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcnt))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 1) + assert_instr(cnt) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -7078,21 +7586,20 @@ pub fn vdup_lane_s32(a: int32x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vdup_lane_u32(a: uint32x2_t) -> uint32x2_t { - static_assert_uimm_bits!(N, 1); - unsafe { simd_shuffle!(a, a, [N as u32, N as u32]) } +pub fn vcntq_p8(a: poly8x16_t) -> poly8x16_t { + unsafe { transmute(vcntq_s8(transmute(a))) } } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_f32)"] +#[doc = "Population count per byte."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcntq_p8)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcnt))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 1) + assert_instr(cnt) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -7101,21 +7608,34 @@ pub fn vdup_lane_u32(a: uint32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vdupq_lane_f32(a: float32x2_t) -> float32x4_t { - static_assert_uimm_bits!(N, 1); - unsafe { simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) } +pub fn vcntq_p8(a: poly8x16_t) -> poly8x16_t { + let a: poly8x16_t = + unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; + unsafe { + let ret_val: poly8x16_t = transmute(vcntq_s8(transmute(a))); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) + } } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_s32)"] +#[doc = "Join two smaller vectors into a single larger vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_f16)"] +#[inline] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg_attr(test, assert_instr(nop))] +pub fn vcombine_f16(a: float16x4_t, b: float16x4_t) -> float16x8_t { + unsafe { simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]) } +} +#[doc = "Join two smaller vectors into a single larger vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_f32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 1) -)] -#[rustc_legacy_const_generics(1)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -7124,21 +7644,15 @@ pub fn vdupq_lane_f32(a: float32x2_t) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vdupq_lane_s32(a: int32x2_t) -> int32x4_t { - static_assert_uimm_bits!(N, 1); - unsafe { simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) } +pub fn vcombine_f32(a: float32x2_t, b: float32x2_t) -> float32x4_t { + unsafe { simd_shuffle!(a, b, [0, 1, 2, 3]) } } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_u32)"] +#[doc = "Join two smaller vectors into a single larger vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_s8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 1) -)] -#[rustc_legacy_const_generics(1)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -7147,21 +7661,15 @@ pub fn vdupq_lane_s32(a: int32x2_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vdupq_lane_u32(a: uint32x2_t) -> uint32x4_t { - static_assert_uimm_bits!(N, 1); - unsafe { simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) } +pub fn vcombine_s8(a: int8x8_t, b: int8x8_t) -> int8x16_t { + unsafe { simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) } } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_p16)"] +#[doc = "Join two smaller vectors into a single larger vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 2) -)] -#[rustc_legacy_const_generics(1)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -7170,21 +7678,15 @@ pub fn vdupq_lane_u32(a: uint32x2_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vdup_lane_p16(a: poly16x4_t) -> poly16x4_t { - static_assert_uimm_bits!(N, 2); - unsafe { simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) } +pub fn vcombine_s16(a: int16x4_t, b: int16x4_t) -> int16x8_t { + unsafe { simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]) } } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_s16)"] +#[doc = "Join two smaller vectors into a single larger vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 2) -)] -#[rustc_legacy_const_generics(1)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -7193,21 +7695,15 @@ pub fn vdup_lane_p16(a: poly16x4_t) -> poly16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vdup_lane_s16(a: int16x4_t) -> int16x4_t { - static_assert_uimm_bits!(N, 2); - unsafe { simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) } +pub fn vcombine_s32(a: int32x2_t, b: int32x2_t) -> int32x4_t { + unsafe { simd_shuffle!(a, b, [0, 1, 2, 3]) } } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_u16)"] +#[doc = "Join two smaller vectors into a single larger vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_s64)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 2) -)] -#[rustc_legacy_const_generics(1)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -7216,21 +7712,15 @@ pub fn vdup_lane_s16(a: int16x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vdup_lane_u16(a: uint16x4_t) -> uint16x4_t { - static_assert_uimm_bits!(N, 2); - unsafe { simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) } +pub fn vcombine_s64(a: int64x1_t, b: int64x1_t) -> int64x2_t { + unsafe { simd_shuffle!(a, b, [0, 1]) } } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_p16)"] +#[doc = "Join two smaller vectors into a single larger vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_u8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 2) -)] -#[rustc_legacy_const_generics(1)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -7239,27 +7729,15 @@ pub fn vdup_lane_u16(a: uint16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vdupq_lane_p16(a: poly16x4_t) -> poly16x8_t { - static_assert_uimm_bits!(N, 2); - unsafe { - simd_shuffle!( - a, - a, - [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32] - ) - } +pub fn vcombine_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x16_t { + unsafe { simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) } } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_s16)"] +#[doc = "Join two smaller vectors into a single larger vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_u16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 2) -)] -#[rustc_legacy_const_generics(1)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -7268,27 +7746,15 @@ pub fn vdupq_lane_p16(a: poly16x4_t) -> poly16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vdupq_lane_s16(a: int16x4_t) -> int16x8_t { - static_assert_uimm_bits!(N, 2); - unsafe { - simd_shuffle!( - a, - a, - [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32] - ) - } +pub fn vcombine_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x8_t { + unsafe { simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]) } } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_u16)"] +#[doc = "Join two smaller vectors into a single larger vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 2) -)] -#[rustc_legacy_const_generics(1)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -7297,27 +7763,15 @@ pub fn vdupq_lane_s16(a: int16x4_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vdupq_lane_u16(a: uint16x4_t) -> uint16x8_t { - static_assert_uimm_bits!(N, 2); - unsafe { - simd_shuffle!( - a, - a, - [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32] - ) - } +pub fn vcombine_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x4_t { + unsafe { simd_shuffle!(a, b, [0, 1, 2, 3]) } } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_p8)"] +#[doc = "Join two smaller vectors into a single larger vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_u64)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 4))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 4) -)] -#[rustc_legacy_const_generics(1)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -7326,27 +7780,15 @@ pub fn vdupq_lane_u16(a: uint16x4_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vdup_lane_p8(a: poly8x8_t) -> poly8x8_t { - static_assert_uimm_bits!(N, 3); - unsafe { - simd_shuffle!( - a, - a, - [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32] - ) - } +pub fn vcombine_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x2_t { + unsafe { simd_shuffle!(a, b, [0, 1]) } } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_s8)"] +#[doc = "Join two smaller vectors into a single larger vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_p8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 4))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 4) -)] -#[rustc_legacy_const_generics(1)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -7355,27 +7797,15 @@ pub fn vdup_lane_p8(a: poly8x8_t) -> poly8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vdup_lane_s8(a: int8x8_t) -> int8x8_t { - static_assert_uimm_bits!(N, 3); - unsafe { - simd_shuffle!( - a, - a, - [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32] - ) - } +pub fn vcombine_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x16_t { + unsafe { simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) } } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_u8)"] +#[doc = "Join two smaller vectors into a single larger vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_p16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 4))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 4) -)] -#[rustc_legacy_const_generics(1)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -7384,27 +7814,15 @@ pub fn vdup_lane_s8(a: int8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vdup_lane_u8(a: uint8x8_t) -> uint8x8_t { - static_assert_uimm_bits!(N, 3); - unsafe { - simd_shuffle!( - a, - a, - [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32] - ) - } +pub fn vcombine_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x8_t { + unsafe { simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]) } } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_p8)"] +#[doc = "Join two smaller vectors into a single larger vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_p64)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 4))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 4) -)] -#[rustc_legacy_const_generics(1)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -7413,62 +7831,53 @@ pub fn vdup_lane_u8(a: uint8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vdupq_lane_p8(a: poly8x8_t) -> poly8x16_t { - static_assert_uimm_bits!(N, 3); - unsafe { - simd_shuffle!( - a, - a, - [ - N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, - N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32 - ] - ) - } +pub fn vcombine_p64(a: poly64x1_t, b: poly64x1_t) -> poly64x2_t { + unsafe { simd_shuffle!(a, b, [0, 1]) } } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_s8)"] +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_f16)"] #[inline] -#[target_feature(enable = "neon")] +#[cfg(target_endian = "little")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 4) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + assert_instr(nop) )] +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +pub fn vcreate_f16(a: u64) -> float16x4_t { + unsafe { transmute(a) } +} +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_f16)"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) )] -pub fn vdupq_lane_s8(a: int8x8_t) -> int8x16_t { - static_assert_uimm_bits!(N, 3); +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +pub fn vcreate_f16(a: u64) -> float16x4_t { unsafe { - simd_shuffle!( - a, - a, - [ - N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, - N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32 - ] - ) + let ret_val: float16x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_u8)"] +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_f32)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 4) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -7477,30 +7886,20 @@ pub fn vdupq_lane_s8(a: int8x8_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vdupq_lane_u8(a: uint8x8_t) -> uint8x16_t { - static_assert_uimm_bits!(N, 3); - unsafe { - simd_shuffle!( - a, - a, - [ - N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, - N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32 - ] - ) - } +pub fn vcreate_f32(a: u64) -> float32x2_t { + unsafe { transmute(a) } } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_s64)"] +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_f32)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, N = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, N = 0) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -7509,21 +7908,23 @@ pub fn vdupq_lane_u8(a: uint8x8_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vdup_lane_s64(a: int64x1_t) -> int64x1_t { - static_assert!(N == 0); - a +pub fn vcreate_f32(a: u64) -> float32x2_t { + unsafe { + let ret_val: float32x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) + } } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_u64)"] +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_s8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, N = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, N = 0) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -7532,59 +7933,20 @@ pub fn vdup_lane_s64(a: int64x1_t) -> int64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vdup_lane_u64(a: uint64x1_t) -> uint64x1_t { - static_assert!(N == 0); - a -} -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_f16)"] -#[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 4))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 4) -)] -#[rustc_legacy_const_generics(1)] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vdup_laneq_f16(a: float16x8_t) -> float16x4_t { - static_assert_uimm_bits!(N, 3); - unsafe { simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) } -} -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_f16)"] -#[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 4))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 4) -)] -#[rustc_legacy_const_generics(1)] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vdupq_laneq_f16(a: float16x8_t) -> float16x8_t { - static_assert_uimm_bits!(N, 3); - unsafe { - simd_shuffle!( - a, - a, - [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32] - ) - } +pub fn vcreate_s8(a: u64) -> int8x8_t { + unsafe { transmute(a) } } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_f32)"] +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_s8)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -7593,21 +7955,23 @@ pub fn vdupq_laneq_f16(a: float16x8_t) -> float16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vdup_laneq_f32(a: float32x4_t) -> float32x2_t { - static_assert_uimm_bits!(N, 2); - unsafe { simd_shuffle!(a, a, [N as u32, N as u32]) } +pub fn vcreate_s8(a: u64) -> int8x8_t { + unsafe { + let ret_val: int8x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + } } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_s32)"] +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_s16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -7616,21 +7980,20 @@ pub fn vdup_laneq_f32(a: float32x4_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vdup_laneq_s32(a: int32x4_t) -> int32x2_t { - static_assert_uimm_bits!(N, 2); - unsafe { simd_shuffle!(a, a, [N as u32, N as u32]) } +pub fn vcreate_s16(a: u64) -> int16x4_t { + unsafe { transmute(a) } } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_u32)"] +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_s16)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -7639,21 +8002,23 @@ pub fn vdup_laneq_s32(a: int32x4_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vdup_laneq_u32(a: uint32x4_t) -> uint32x2_t { - static_assert_uimm_bits!(N, 2); - unsafe { simd_shuffle!(a, a, [N as u32, N as u32]) } +pub fn vcreate_s16(a: u64) -> int16x4_t { + unsafe { + let ret_val: int16x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + } } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_f32)"] +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_s32)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -7662,21 +8027,20 @@ pub fn vdup_laneq_u32(a: uint32x4_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vdupq_laneq_f32(a: float32x4_t) -> float32x4_t { - static_assert_uimm_bits!(N, 2); - unsafe { simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) } +pub fn vcreate_s32(a: u64) -> int32x2_t { + unsafe { transmute(a) } } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_s32)"] +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_s32)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -7685,21 +8049,22 @@ pub fn vdupq_laneq_f32(a: float32x4_t) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vdupq_laneq_s32(a: int32x4_t) -> int32x4_t { - static_assert_uimm_bits!(N, 2); - unsafe { simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) } +pub fn vcreate_s32(a: u64) -> int32x2_t { + unsafe { + let ret_val: int32x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) + } } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_u32)"] +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_s64)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -7708,21 +8073,20 @@ pub fn vdupq_laneq_s32(a: int32x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vdupq_laneq_u32(a: uint32x4_t) -> uint32x4_t { - static_assert_uimm_bits!(N, 2); - unsafe { simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) } +pub fn vcreate_s64(a: u64) -> int64x1_t { + unsafe { transmute(a) } } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_p16)"] +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_u8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 4) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -7731,21 +8095,20 @@ pub fn vdupq_laneq_u32(a: uint32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vdup_laneq_p16(a: poly16x8_t) -> poly16x4_t { - static_assert_uimm_bits!(N, 3); - unsafe { simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) } +pub fn vcreate_u8(a: u64) -> uint8x8_t { + unsafe { transmute(a) } } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_s16)"] +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_u8)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 4) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -7754,21 +8117,23 @@ pub fn vdup_laneq_p16(a: poly16x8_t) -> poly16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vdup_laneq_s16(a: int16x8_t) -> int16x4_t { - static_assert_uimm_bits!(N, 3); - unsafe { simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) } +pub fn vcreate_u8(a: u64) -> uint8x8_t { + unsafe { + let ret_val: uint8x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + } } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_u16)"] +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_u16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 4) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -7777,21 +8142,20 @@ pub fn vdup_laneq_s16(a: int16x8_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vdup_laneq_u16(a: uint16x8_t) -> uint16x4_t { - static_assert_uimm_bits!(N, 3); - unsafe { simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) } +pub fn vcreate_u16(a: u64) -> uint16x4_t { + unsafe { transmute(a) } } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_p16)"] +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_u16)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 4) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -7800,27 +8164,23 @@ pub fn vdup_laneq_u16(a: uint16x8_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vdupq_laneq_p16(a: poly16x8_t) -> poly16x8_t { - static_assert_uimm_bits!(N, 3); +pub fn vcreate_u16(a: u64) -> uint16x4_t { unsafe { - simd_shuffle!( - a, - a, - [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32] - ) + let ret_val: uint16x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_s16)"] +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_u32)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 4) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -7829,27 +8189,20 @@ pub fn vdupq_laneq_p16(a: poly16x8_t) -> poly16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vdupq_laneq_s16(a: int16x8_t) -> int16x8_t { - static_assert_uimm_bits!(N, 3); - unsafe { - simd_shuffle!( - a, - a, - [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32] - ) - } +pub fn vcreate_u32(a: u64) -> uint32x2_t { + unsafe { transmute(a) } } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_u16)"] +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_u32)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 4) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -7858,27 +8211,22 @@ pub fn vdupq_laneq_s16(a: int16x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vdupq_laneq_u16(a: uint16x8_t) -> uint16x8_t { - static_assert_uimm_bits!(N, 3); +pub fn vcreate_u32(a: u64) -> uint32x2_t { unsafe { - simd_shuffle!( - a, - a, - [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32] - ) + let ret_val: uint32x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_p8)"] +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_u64)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 8))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 8) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -7887,27 +8235,20 @@ pub fn vdupq_laneq_u16(a: uint16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vdup_laneq_p8(a: poly8x16_t) -> poly8x8_t { - static_assert_uimm_bits!(N, 4); - unsafe { - simd_shuffle!( - a, - a, - [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32] - ) - } +pub fn vcreate_u64(a: u64) -> uint64x1_t { + unsafe { transmute(a) } } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_s8)"] +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_p8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 8))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 8) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -7916,27 +8257,20 @@ pub fn vdup_laneq_p8(a: poly8x16_t) -> poly8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vdup_laneq_s8(a: int8x16_t) -> int8x8_t { - static_assert_uimm_bits!(N, 4); - unsafe { - simd_shuffle!( - a, - a, - [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32] - ) - } +pub fn vcreate_p8(a: u64) -> poly8x8_t { + unsafe { transmute(a) } } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_u8)"] +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_p8)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 8))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 8) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -7945,27 +8279,23 @@ pub fn vdup_laneq_s8(a: int8x16_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vdup_laneq_u8(a: uint8x16_t) -> uint8x8_t { - static_assert_uimm_bits!(N, 4); +pub fn vcreate_p8(a: u64) -> poly8x8_t { unsafe { - simd_shuffle!( - a, - a, - [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32] - ) + let ret_val: poly8x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_p8)"] +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_p16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 8))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 8) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -7974,30 +8304,20 @@ pub fn vdup_laneq_u8(a: uint8x16_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vdupq_laneq_p8(a: poly8x16_t) -> poly8x16_t { - static_assert_uimm_bits!(N, 4); - unsafe { - simd_shuffle!( - a, - a, - [ - N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, - N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32 - ] - ) - } +pub fn vcreate_p16(a: u64) -> poly16x4_t { + unsafe { transmute(a) } } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_s8)"] +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_p16)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 8))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 8) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -8006,30 +8326,22 @@ pub fn vdupq_laneq_p8(a: poly8x16_t) -> poly8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vdupq_laneq_s8(a: int8x16_t) -> int8x16_t { - static_assert_uimm_bits!(N, 4); +pub fn vcreate_p16(a: u64) -> poly16x4_t { unsafe { - simd_shuffle!( - a, - a, - [ - N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, - N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32 - ] - ) + let ret_val: poly16x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_u8)"] +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_p64)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 8))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 8) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -8038,150 +8350,103 @@ pub fn vdupq_laneq_s8(a: int8x16_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vdupq_laneq_u8(a: uint8x16_t) -> uint8x16_t { - static_assert_uimm_bits!(N, 4); - unsafe { - simd_shuffle!( - a, - a, - [ - N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, - N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32 - ] - ) - } +pub fn vcreate_p64(a: u64) -> poly64x1_t { + unsafe { transmute(a) } } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_s64)"] +#[doc = "Floating-point convert to lower precision narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f16_f32)"] #[inline] -#[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 1))] +# [cfg_attr (all (test , target_arch = "arm") , assert_instr (vcvt . f16 . f32))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, N = 1) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + assert_instr(fcvtn) )] -pub fn vdup_laneq_s64(a: int64x2_t) -> int64x1_t { - static_assert_uimm_bits!(N, 1); - unsafe { transmute::(simd_extract!(a, N as u32)) } +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +pub fn vcvt_f16_f32(a: float32x4_t) -> float16x4_t { + unsafe { simd_cast(a) } } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_u64)"] +#[doc = "Fixed-point convert to floating-point"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f16_s16)"] #[inline] -#[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, N = 1) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + assert_instr(scvtf) )] -pub fn vdup_laneq_u64(a: uint64x2_t) -> uint64x1_t { - static_assert_uimm_bits!(N, 1); - unsafe { transmute::(simd_extract!(a, N as u32)) } +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +pub fn vcvt_f16_s16(a: int16x4_t) -> float16x4_t { + unsafe { simd_cast(a) } } -#[doc = "Create a new vector with all lanes set to a value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_n_f16)"] +#[doc = "Fixed-point convert to floating-point"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_f16_s16)"] #[inline] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup) + assert_instr(scvtf) )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vdup_n_f16(a: f16) -> float16x4_t { - float16x4_t::splat(a) +pub fn vcvtq_f16_s16(a: int16x8_t) -> float16x8_t { + unsafe { simd_cast(a) } } -#[doc = "Create a new vector with all lanes set to a value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_n_f16)"] +#[doc = "Fixed-point convert to floating-point"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f16_u16)"] #[inline] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup) + assert_instr(ucvtf) )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vdupq_n_f16(a: f16) -> float16x8_t { - float16x8_t::splat(a) +pub fn vcvt_f16_u16(a: uint16x4_t) -> float16x4_t { + unsafe { simd_cast(a) } } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_s64)"] +#[doc = "Fixed-point convert to floating-point"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_f16_u16)"] #[inline] -#[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 0) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + assert_instr(ucvtf) )] -pub fn vdupq_lane_s64(a: int64x1_t) -> int64x2_t { - static_assert!(N == 0); - unsafe { simd_shuffle!(a, a, [N as u32, N as u32]) } +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +pub fn vcvtq_f16_u16(a: uint16x8_t) -> float16x8_t { + unsafe { simd_cast(a) } } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_u64)"] +#[doc = "Floating-point convert to higher precision long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f32_f16)"] #[inline] -#[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 0) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + assert_instr(fcvtl) )] -pub fn vdupq_lane_u64(a: uint64x1_t) -> uint64x2_t { - static_assert!(N == 0); - unsafe { simd_shuffle!(a, a, [N as u32, N as u32]) } +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +pub fn vcvt_f32_f16(a: float16x4_t) -> float32x4_t { + unsafe { simd_cast(a) } } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_s64)"] +#[doc = "Fixed-point convert to floating-point"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f32_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 1) + assert_instr(scvtf) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -8190,21 +8455,19 @@ pub fn vdupq_lane_u64(a: uint64x1_t) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vdupq_laneq_s64(a: int64x2_t) -> int64x2_t { - static_assert_uimm_bits!(N, 1); - unsafe { simd_shuffle!(a, a, [N as u32, N as u32]) } +pub fn vcvt_f32_s32(a: int32x2_t) -> float32x2_t { + unsafe { simd_cast(a) } } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_u64)"] +#[doc = "Fixed-point convert to floating-point"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_f32_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 1) + assert_instr(scvtf) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -8213,19 +8476,18 @@ pub fn vdupq_laneq_s64(a: int64x2_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vdupq_laneq_u64(a: uint64x2_t) -> uint64x2_t { - static_assert_uimm_bits!(N, 1); - unsafe { simd_shuffle!(a, a, [N as u32, N as u32]) } +pub fn vcvtq_f32_s32(a: int32x4_t) -> float32x4_t { + unsafe { simd_cast(a) } } -#[doc = "Vector bitwise exclusive or (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor_s8)"] +#[doc = "Fixed-point convert to floating-point"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f32_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(eor) + assert_instr(ucvtf) )] #[cfg_attr( not(target_arch = "arm"), @@ -8235,18 +8497,18 @@ pub fn vdupq_laneq_u64(a: uint64x2_t) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn veor_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - unsafe { simd_xor(a, b) } +pub fn vcvt_f32_u32(a: uint32x2_t) -> float32x2_t { + unsafe { simd_cast(a) } } -#[doc = "Vector bitwise exclusive or (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veorq_s8)"] +#[doc = "Fixed-point convert to floating-point"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_f32_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(eor) + assert_instr(ucvtf) )] #[cfg_attr( not(target_arch = "arm"), @@ -8256,270 +8518,566 @@ pub fn veor_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn veorq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - unsafe { simd_xor(a, b) } +pub fn vcvtq_f32_u32(a: uint32x4_t) -> float32x4_t { + unsafe { simd_cast(a) } } -#[doc = "Vector bitwise exclusive or (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor_s16)"] +#[doc = "Fixed-point convert to floating-point"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_f16_s16)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcvt", N = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(eor) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + assert_instr(scvtf, N = 1) )] -pub fn veor_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - unsafe { simd_xor(a, b) } +#[rustc_legacy_const_generics(1)] +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +pub fn vcvt_n_f16_s16(a: int16x4_t) -> float16x4_t { + static_assert!(N >= 1 && N <= 16); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "arm", + link_name = "llvm.arm.neon.vcvtfxs2fp.v4f16.v4i16" + )] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcvtfxs2fp.v4f16.v4i16" + )] + fn _vcvt_n_f16_s16(a: int16x4_t, n: i32) -> float16x4_t; + } + unsafe { _vcvt_n_f16_s16(a, N) } } -#[doc = "Vector bitwise exclusive or (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veorq_s16)"] +#[doc = "Fixed-point convert to floating-point"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_f16_s16)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcvt", N = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(eor) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + assert_instr(scvtf, N = 1) )] -pub fn veorq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - unsafe { simd_xor(a, b) } +#[rustc_legacy_const_generics(1)] +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +pub fn vcvtq_n_f16_s16(a: int16x8_t) -> float16x8_t { + static_assert!(N >= 1 && N <= 16); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "arm", + link_name = "llvm.arm.neon.vcvtfxs2fp.v8f16.v8i16" + )] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcvtfxs2fp.v8f16.v8i16" + )] + fn _vcvtq_n_f16_s16(a: int16x8_t, n: i32) -> float16x8_t; + } + unsafe { _vcvtq_n_f16_s16(a, N) } } -#[doc = "Vector bitwise exclusive or (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor_s32)"] +#[doc = "Fixed-point convert to floating-point"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_f16_u16)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcvt", N = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(eor) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + assert_instr(ucvtf, N = 1) )] -pub fn veor_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - unsafe { simd_xor(a, b) } +#[rustc_legacy_const_generics(1)] +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +pub fn vcvt_n_f16_u16(a: uint16x4_t) -> float16x4_t { + static_assert!(N >= 1 && N <= 16); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "arm", + link_name = "llvm.arm.neon.vcvtfxu2fp.v4f16.v4i16" + )] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcvtfxu2fp.v4f16.v4i16" + )] + fn _vcvt_n_f16_u16(a: uint16x4_t, n: i32) -> float16x4_t; + } + unsafe { _vcvt_n_f16_u16(a, N) } } -#[doc = "Vector bitwise exclusive or (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veorq_s32)"] +#[doc = "Fixed-point convert to floating-point"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_f16_u16)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcvt", N = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(eor) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + assert_instr(ucvtf, N = 1) )] -pub fn veorq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - unsafe { simd_xor(a, b) } +#[rustc_legacy_const_generics(1)] +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +pub fn vcvtq_n_f16_u16(a: uint16x8_t) -> float16x8_t { + static_assert!(N >= 1 && N <= 16); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "arm", + link_name = "llvm.arm.neon.vcvtfxu2fp.v8f16.v8i16" + )] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcvtfxu2fp.v8f16.v8i16" + )] + fn _vcvtq_n_f16_u16(a: uint16x8_t, n: i32) -> float16x8_t; + } + unsafe { _vcvtq_n_f16_u16(a, N) } } -#[doc = "Vector bitwise exclusive or (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor_s64)"] +#[doc = "Fixed-point convert to floating-point"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_f32_s32)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(eor) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn veor_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { - unsafe { simd_xor(a, b) } +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vcvt, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub fn vcvt_n_f32_s32(a: int32x2_t) -> float32x2_t { + static_assert!(N >= 1 && N <= 32); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "arm", + link_name = "llvm.arm.neon.vcvtfxs2fp.v2f32.v2i32" + )] + fn _vcvt_n_f32_s32(a: int32x2_t, n: i32) -> float32x2_t; + } + unsafe { _vcvt_n_f32_s32(a, N) } } -#[doc = "Vector bitwise exclusive or (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veorq_s64)"] +#[doc = "Fixed-point convert to floating-point"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_f32_s32)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(eor) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn veorq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - unsafe { simd_xor(a, b) } +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vcvt, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub fn vcvtq_n_f32_s32(a: int32x4_t) -> float32x4_t { + static_assert!(N >= 1 && N <= 32); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "arm", + link_name = "llvm.arm.neon.vcvtfxs2fp.v4f32.v4i32" + )] + fn _vcvtq_n_f32_s32(a: int32x4_t, n: i32) -> float32x4_t; + } + unsafe { _vcvtq_n_f32_s32(a, N) } } -#[doc = "Vector bitwise exclusive or (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor_u8)"] +#[doc = "Fixed-point convert to floating-point"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_f32_s32)"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(eor) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn veor_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - unsafe { simd_xor(a, b) } +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(scvtf, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vcvt_n_f32_s32(a: int32x2_t) -> float32x2_t { + static_assert!(N >= 1 && N <= 32); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcvtfxs2fp.v2f32.v2i32" + )] + fn _vcvt_n_f32_s32(a: int32x2_t, n: i32) -> float32x2_t; + } + unsafe { _vcvt_n_f32_s32(a, N) } } -#[doc = "Vector bitwise exclusive or (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veorq_u8)"] +#[doc = "Fixed-point convert to floating-point"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_f32_s32)"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(eor) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn veorq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - unsafe { simd_xor(a, b) } +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(scvtf, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vcvtq_n_f32_s32(a: int32x4_t) -> float32x4_t { + static_assert!(N >= 1 && N <= 32); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcvtfxs2fp.v4f32.v4i32" + )] + fn _vcvtq_n_f32_s32(a: int32x4_t, n: i32) -> float32x4_t; + } + unsafe { _vcvtq_n_f32_s32(a, N) } } -#[doc = "Vector bitwise exclusive or (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor_u16)"] +#[doc = "Fixed-point convert to floating-point"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_f32_u32)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(eor) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn veor_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - unsafe { simd_xor(a, b) } +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vcvt, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub fn vcvt_n_f32_u32(a: uint32x2_t) -> float32x2_t { + static_assert!(N >= 1 && N <= 32); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "arm", + link_name = "llvm.arm.neon.vcvtfxu2fp.v2f32.v2i32" + )] + fn _vcvt_n_f32_u32(a: uint32x2_t, n: i32) -> float32x2_t; + } + unsafe { _vcvt_n_f32_u32(a, N) } } -#[doc = "Vector bitwise exclusive or (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veorq_u16)"] +#[doc = "Fixed-point convert to floating-point"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_f32_u32)"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vcvt, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub fn vcvtq_n_f32_u32(a: uint32x4_t) -> float32x4_t { + static_assert!(N >= 1 && N <= 32); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "arm", + link_name = "llvm.arm.neon.vcvtfxu2fp.v4f32.v4i32" + )] + fn _vcvtq_n_f32_u32(a: uint32x4_t, n: i32) -> float32x4_t; + } + unsafe { _vcvtq_n_f32_u32(a, N) } +} +#[doc = "Fixed-point convert to floating-point"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_f32_u32)"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ucvtf, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vcvt_n_f32_u32(a: uint32x2_t) -> float32x2_t { + static_assert!(N >= 1 && N <= 32); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcvtfxu2fp.v2f32.v2i32" + )] + fn _vcvt_n_f32_u32(a: uint32x2_t, n: i32) -> float32x2_t; + } + unsafe { _vcvt_n_f32_u32(a, N) } +} +#[doc = "Fixed-point convert to floating-point"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_f32_u32)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ucvtf, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vcvtq_n_f32_u32(a: uint32x4_t) -> float32x4_t { + static_assert!(N >= 1 && N <= 32); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcvtfxu2fp.v4f32.v4i32" + )] + fn _vcvtq_n_f32_u32(a: uint32x4_t, n: i32) -> float32x4_t; + } + unsafe { _vcvtq_n_f32_u32(a, N) } +} +#[doc = "Floating-point convert to signed fixed-point"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_s16_f16)"] +#[inline] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcvt", N = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(eor) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + assert_instr(fcvtzs, N = 1) )] +#[rustc_legacy_const_generics(1)] +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +pub fn vcvt_n_s16_f16(a: float16x4_t) -> int16x4_t { + static_assert!(N >= 1 && N <= 16); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "arm", + link_name = "llvm.arm.neon.vcvtfp2fxs.v4i16.v4f16" + )] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcvtfp2fxs.v4i16.v4f16" + )] + fn _vcvt_n_s16_f16(a: float16x4_t, n: i32) -> int16x4_t; + } + unsafe { _vcvt_n_s16_f16(a, N) } +} +#[doc = "Floating-point convert to signed fixed-point"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_s16_f16)"] +#[inline] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcvt", N = 1))] #[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fcvtzs, N = 1) )] -pub fn veorq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - unsafe { simd_xor(a, b) } +#[rustc_legacy_const_generics(1)] +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +pub fn vcvtq_n_s16_f16(a: float16x8_t) -> int16x8_t { + static_assert!(N >= 1 && N <= 16); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "arm", + link_name = "llvm.arm.neon.vcvtfp2fxs.v8i16.v8f16" + )] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcvtfp2fxs.v8i16.v8f16" + )] + fn _vcvtq_n_s16_f16(a: float16x8_t, n: i32) -> int16x8_t; + } + unsafe { _vcvtq_n_s16_f16(a, N) } } -#[doc = "Vector bitwise exclusive or (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor_u32)"] +#[doc = "Floating-point convert to fixed-point, rounding toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_s32_f32)"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vcvt, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub fn vcvt_n_s32_f32(a: float32x2_t) -> int32x2_t { + static_assert!(N >= 1 && N <= 32); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "arm", + link_name = "llvm.arm.neon.vcvtfp2fxs.v2i32.v2f32" + )] + fn _vcvt_n_s32_f32(a: float32x2_t, n: i32) -> int32x2_t; + } + unsafe { _vcvt_n_s32_f32(a, N) } +} +#[doc = "Floating-point convert to fixed-point, rounding toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_s32_f32)"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vcvt, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub fn vcvtq_n_s32_f32(a: float32x4_t) -> int32x4_t { + static_assert!(N >= 1 && N <= 32); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "arm", + link_name = "llvm.arm.neon.vcvtfp2fxs.v4i32.v4f32" + )] + fn _vcvtq_n_s32_f32(a: float32x4_t, n: i32) -> int32x4_t; + } + unsafe { _vcvtq_n_s32_f32(a, N) } +} +#[doc = "Floating-point convert to fixed-point, rounding toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_s32_f32)"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(fcvtzs, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vcvt_n_s32_f32(a: float32x2_t) -> int32x2_t { + static_assert!(N >= 1 && N <= 32); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcvtfp2fxs.v2i32.v2f32" + )] + fn _vcvt_n_s32_f32(a: float32x2_t, n: i32) -> int32x2_t; + } + unsafe { _vcvt_n_s32_f32(a, N) } +} +#[doc = "Floating-point convert to fixed-point, rounding toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_s32_f32)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(fcvtzs, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vcvtq_n_s32_f32(a: float32x4_t) -> int32x4_t { + static_assert!(N >= 1 && N <= 32); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcvtfp2fxs.v4i32.v4f32" + )] + fn _vcvtq_n_s32_f32(a: float32x4_t, n: i32) -> int32x4_t; + } + unsafe { _vcvtq_n_s32_f32(a, N) } +} +#[doc = "Fixed-point convert to unsigned fixed-point, rounding toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_u16_f16)"] +#[inline] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcvt", N = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(eor) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + assert_instr(fcvtzu, N = 1) )] +#[rustc_legacy_const_generics(1)] +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +pub fn vcvt_n_u16_f16(a: float16x4_t) -> uint16x4_t { + static_assert!(N >= 1 && N <= 16); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "arm", + link_name = "llvm.arm.neon.vcvtfp2fxu.v4i16.v4f16" + )] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcvtfp2fxu.v4i16.v4f16" + )] + fn _vcvt_n_u16_f16(a: float16x4_t, n: i32) -> uint16x4_t; + } + unsafe { _vcvt_n_u16_f16(a, N) } +} +#[doc = "Fixed-point convert to unsigned fixed-point, rounding toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_u16_f16)"] +#[inline] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcvt", N = 1))] #[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fcvtzu, N = 1) )] -pub fn veor_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - unsafe { simd_xor(a, b) } +#[rustc_legacy_const_generics(1)] +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +pub fn vcvtq_n_u16_f16(a: float16x8_t) -> uint16x8_t { + static_assert!(N >= 1 && N <= 16); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "arm", + link_name = "llvm.arm.neon.vcvtfp2fxu.v8i16.v8f16" + )] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcvtfp2fxu.v8i16.v8f16" + )] + fn _vcvtq_n_u16_f16(a: float16x8_t, n: i32) -> uint16x8_t; + } + unsafe { _vcvtq_n_u16_f16(a, N) } } -#[doc = "Vector bitwise exclusive or (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veorq_u32)"] +#[doc = "Floating-point convert to fixed-point, rounding toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_u32_f32)"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vcvt, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub fn vcvt_n_u32_f32(a: float32x2_t) -> uint32x2_t { + static_assert!(N >= 1 && N <= 32); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "arm", + link_name = "llvm.arm.neon.vcvtfp2fxu.v2i32.v2f32" + )] + fn _vcvt_n_u32_f32(a: float32x2_t, n: i32) -> uint32x2_t; + } + unsafe { _vcvt_n_u32_f32(a, N) } +} +#[doc = "Floating-point convert to fixed-point, rounding toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_u32_f32)"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vcvt, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub fn vcvtq_n_u32_f32(a: float32x4_t) -> uint32x4_t { + static_assert!(N >= 1 && N <= 32); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "arm", + link_name = "llvm.arm.neon.vcvtfp2fxu.v4i32.v4f32" + )] + fn _vcvtq_n_u32_f32(a: float32x4_t, n: i32) -> uint32x4_t; + } + unsafe { _vcvtq_n_u32_f32(a, N) } +} +#[doc = "Floating-point convert to fixed-point, rounding toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_u32_f32)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(fcvtzu, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vcvt_n_u32_f32(a: float32x2_t) -> uint32x2_t { + static_assert!(N >= 1 && N <= 32); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcvtfp2fxu.v2i32.v2f32" + )] + fn _vcvt_n_u32_f32(a: float32x2_t, n: i32) -> uint32x2_t; + } + unsafe { _vcvt_n_u32_f32(a, N) } +} +#[doc = "Floating-point convert to fixed-point, rounding toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_u32_f32)"] #[inline] #[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(fcvtzu, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vcvtq_n_u32_f32(a: float32x4_t) -> uint32x4_t { + static_assert!(N >= 1 && N <= 32); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcvtfp2fxu.v4i32.v4f32" + )] + fn _vcvtq_n_u32_f32(a: float32x4_t, n: i32) -> uint32x4_t; + } + unsafe { _vcvtq_n_u32_f32(a, N) } +} +#[doc = "Floating-point convert to signed fixed-point, rounding toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_s16_f16)"] +#[inline] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(eor) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + assert_instr(fcvtzs) )] +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +pub fn vcvt_s16_f16(a: float16x4_t) -> int16x4_t { + unsafe { simd_cast(a) } +} +#[doc = "Floating-point convert to signed fixed-point, rounding toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_s16_f16)"] +#[inline] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] #[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fcvtzs) )] -pub fn veorq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - unsafe { simd_xor(a, b) } +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +pub fn vcvtq_s16_f16(a: float16x8_t) -> int16x8_t { + unsafe { simd_cast(a) } } -#[doc = "Vector bitwise exclusive or (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor_u64)"] +#[doc = "Floating-point convert to signed fixed-point, rounding toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_s32_f32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(eor) + assert_instr(fcvtzs) )] #[cfg_attr( not(target_arch = "arm"), @@ -8529,18 +9087,26 @@ pub fn veorq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn veor_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { - unsafe { simd_xor(a, b) } +pub fn vcvt_s32_f32(a: float32x2_t) -> int32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.fptosi.sat.v2i32.v2f32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.fptosi.sat.v2i32.v2f32" + )] + fn _vcvt_s32_f32(a: float32x2_t) -> int32x2_t; + } + unsafe { _vcvt_s32_f32(a) } } -#[doc = "Vector bitwise exclusive or (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veorq_u64)"] +#[doc = "Floating-point convert to signed fixed-point, rounding toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_s32_f32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(eor) + assert_instr(fcvtzs) )] #[cfg_attr( not(target_arch = "arm"), @@ -8550,44 +9116,55 @@ pub fn veor_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn veorq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - unsafe { simd_xor(a, b) } +pub fn vcvtq_s32_f32(a: float32x4_t) -> int32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.fptosi.sat.v4i32.v4f32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.fptosi.sat.v4i32.v4f32" + )] + fn _vcvtq_s32_f32(a: float32x4_t) -> int32x4_t; + } + unsafe { _vcvtq_s32_f32(a) } } -#[doc = "Extract vector from pair of vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_f16)"] +#[doc = "Floating-point convert to unsigned fixed-point, rounding toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_u16_f16)"] #[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 3))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext, N = 3) + assert_instr(fcvtzu) )] -#[rustc_legacy_const_generics(2)] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vext_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { - static_assert_uimm_bits!(N, 2); - unsafe { - match N & 0b11 { - 0 => simd_shuffle!(a, b, [0, 1, 2, 3]), - 1 => simd_shuffle!(a, b, [1, 2, 3, 4]), - 2 => simd_shuffle!(a, b, [2, 3, 4, 5]), - 3 => simd_shuffle!(a, b, [3, 4, 5, 6]), - _ => unreachable_unchecked(), - } - } +pub fn vcvt_u16_f16(a: float16x4_t) -> uint16x4_t { + unsafe { simd_cast(a) } } -#[doc = "Extract vector from pair of vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_f32)"] +#[doc = "Floating-point convert to unsigned fixed-point, rounding toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_u16_f16)"] +#[inline] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fcvtzu) +)] +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +pub fn vcvtq_u16_f16(a: float16x8_t) -> uint16x8_t { + unsafe { simd_cast(a) } +} +#[doc = "Floating-point convert to unsigned fixed-point, rounding toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_u32_f32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext, N = 1) + assert_instr(fcvtzu) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -8596,27 +9173,27 @@ pub fn vext_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vext_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - static_assert_uimm_bits!(N, 1); - unsafe { - match N & 0b1 { - 0 => simd_shuffle!(a, b, [0, 1]), - 1 => simd_shuffle!(a, b, [1, 2]), - _ => unreachable_unchecked(), - } +pub fn vcvt_u32_f32(a: float32x2_t) -> uint32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.fptoui.sat.v2i32.v2f32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.fptoui.sat.v2i32.v2f32" + )] + fn _vcvt_u32_f32(a: float32x2_t) -> uint32x2_t; } + unsafe { _vcvt_u32_f32(a) } } -#[doc = "Extract vector from pair of vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_s32)"] +#[doc = "Floating-point convert to unsigned fixed-point, rounding toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_u32_f32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext, N = 1) + assert_instr(fcvtzu) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -8625,294 +9202,292 @@ pub fn vext_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vext_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - static_assert_uimm_bits!(N, 1); - unsafe { - match N & 0b1 { - 0 => simd_shuffle!(a, b, [0, 1]), - 1 => simd_shuffle!(a, b, [1, 2]), - _ => unreachable_unchecked(), - } +pub fn vcvtq_u32_f32(a: float32x4_t) -> uint32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.fptoui.sat.v4i32.v4f32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.fptoui.sat.v4i32.v4f32" + )] + fn _vcvtq_u32_f32(a: float32x4_t) -> uint32x4_t; } + unsafe { _vcvtq_u32_f32(a) } } -#[doc = "Extract vector from pair of vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_u32)"] +#[doc = "Dot product arithmetic (indexed)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdot_lane_s32)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 1))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon,dotprod")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsdot, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext, N = 1) + assert_instr(sdot, LANE = 0) )] -#[rustc_legacy_const_generics(2)] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + unstable(feature = "stdarch_neon_dotprod", issue = "117224") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vext_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - static_assert_uimm_bits!(N, 1); +pub fn vdot_lane_s32(a: int32x2_t, b: int8x8_t, c: int8x8_t) -> int32x2_t { + static_assert_uimm_bits!(LANE, 1); unsafe { - match N & 0b1 { - 0 => simd_shuffle!(a, b, [0, 1]), - 1 => simd_shuffle!(a, b, [1, 2]), - _ => unreachable_unchecked(), - } - } + let c: int32x2_t = transmute(c); + let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]); + vdot_s32(a, b, transmute(c)) + } } -#[doc = "Extract vector from pair of vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_s8)"] +#[doc = "Dot product arithmetic (indexed)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdotq_lane_s32)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 7))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon,dotprod")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsdot, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext, N = 7) + assert_instr(sdot, LANE = 0) )] -#[rustc_legacy_const_generics(2)] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + unstable(feature = "stdarch_neon_dotprod", issue = "117224") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vext_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - static_assert_uimm_bits!(N, 3); +pub fn vdotq_lane_s32(a: int32x4_t, b: int8x16_t, c: int8x8_t) -> int32x4_t { + static_assert_uimm_bits!(LANE, 1); unsafe { - match N & 0b111 { - 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]), - 1 => simd_shuffle!(a, b, [1, 2, 3, 4, 5, 6, 7, 8]), - 2 => simd_shuffle!(a, b, [2, 3, 4, 5, 6, 7, 8, 9]), - 3 => simd_shuffle!(a, b, [3, 4, 5, 6, 7, 8, 9, 10]), - 4 => simd_shuffle!(a, b, [4, 5, 6, 7, 8, 9, 10, 11]), - 5 => simd_shuffle!(a, b, [5, 6, 7, 8, 9, 10, 11, 12]), - 6 => simd_shuffle!(a, b, [6, 7, 8, 9, 10, 11, 12, 13]), - 7 => simd_shuffle!(a, b, [7, 8, 9, 10, 11, 12, 13, 14]), - _ => unreachable_unchecked(), - } + let c: int32x2_t = transmute(c); + let c: int32x4_t = + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); + vdotq_s32(a, b, transmute(c)) } } -#[doc = "Extract vector from pair of vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_s16)"] +#[doc = "Dot product arithmetic (indexed)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdot_lane_u32)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 7))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon,dotprod")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vudot, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext, N = 7) + assert_instr(udot, LANE = 0) )] -#[rustc_legacy_const_generics(2)] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + unstable(feature = "stdarch_neon_dotprod", issue = "117224") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vextq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - static_assert_uimm_bits!(N, 3); +pub fn vdot_lane_u32(a: uint32x2_t, b: uint8x8_t, c: uint8x8_t) -> uint32x2_t { + static_assert_uimm_bits!(LANE, 1); unsafe { - match N & 0b111 { - 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]), - 1 => simd_shuffle!(a, b, [1, 2, 3, 4, 5, 6, 7, 8]), - 2 => simd_shuffle!(a, b, [2, 3, 4, 5, 6, 7, 8, 9]), - 3 => simd_shuffle!(a, b, [3, 4, 5, 6, 7, 8, 9, 10]), - 4 => simd_shuffle!(a, b, [4, 5, 6, 7, 8, 9, 10, 11]), - 5 => simd_shuffle!(a, b, [5, 6, 7, 8, 9, 10, 11, 12]), - 6 => simd_shuffle!(a, b, [6, 7, 8, 9, 10, 11, 12, 13]), - 7 => simd_shuffle!(a, b, [7, 8, 9, 10, 11, 12, 13, 14]), - _ => unreachable_unchecked(), - } + let c: uint32x2_t = transmute(c); + let c: uint32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]); + vdot_u32(a, b, transmute(c)) } } -#[doc = "Extract vector from pair of vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_u8)"] +#[doc = "Dot product arithmetic (indexed)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdotq_lane_u32)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 7))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon,dotprod")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vudot, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext, N = 7) + assert_instr(udot, LANE = 0) )] -#[rustc_legacy_const_generics(2)] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + unstable(feature = "stdarch_neon_dotprod", issue = "117224") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vext_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - static_assert_uimm_bits!(N, 3); +pub fn vdotq_lane_u32(a: uint32x4_t, b: uint8x16_t, c: uint8x8_t) -> uint32x4_t { + static_assert_uimm_bits!(LANE, 1); unsafe { - match N & 0b111 { - 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]), - 1 => simd_shuffle!(a, b, [1, 2, 3, 4, 5, 6, 7, 8]), - 2 => simd_shuffle!(a, b, [2, 3, 4, 5, 6, 7, 8, 9]), - 3 => simd_shuffle!(a, b, [3, 4, 5, 6, 7, 8, 9, 10]), - 4 => simd_shuffle!(a, b, [4, 5, 6, 7, 8, 9, 10, 11]), - 5 => simd_shuffle!(a, b, [5, 6, 7, 8, 9, 10, 11, 12]), - 6 => simd_shuffle!(a, b, [6, 7, 8, 9, 10, 11, 12, 13]), - 7 => simd_shuffle!(a, b, [7, 8, 9, 10, 11, 12, 13, 14]), - _ => unreachable_unchecked(), - } + let c: uint32x2_t = transmute(c); + let c: uint32x4_t = + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); + vdotq_u32(a, b, transmute(c)) } } -#[doc = "Extract vector from pair of vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_u16)"] +#[doc = "Dot product arithmetic (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdot_s32)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 7))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon,dotprod")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsdot))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext, N = 7) + assert_instr(sdot) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + unstable(feature = "stdarch_neon_dotprod", issue = "117224") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vextq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - static_assert_uimm_bits!(N, 3); - unsafe { - match N & 0b111 { - 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]), - 1 => simd_shuffle!(a, b, [1, 2, 3, 4, 5, 6, 7, 8]), - 2 => simd_shuffle!(a, b, [2, 3, 4, 5, 6, 7, 8, 9]), - 3 => simd_shuffle!(a, b, [3, 4, 5, 6, 7, 8, 9, 10]), - 4 => simd_shuffle!(a, b, [4, 5, 6, 7, 8, 9, 10, 11]), - 5 => simd_shuffle!(a, b, [5, 6, 7, 8, 9, 10, 11, 12]), - 6 => simd_shuffle!(a, b, [6, 7, 8, 9, 10, 11, 12, 13]), - 7 => simd_shuffle!(a, b, [7, 8, 9, 10, 11, 12, 13, 14]), - _ => unreachable_unchecked(), - } +pub fn vdot_s32(a: int32x2_t, b: int8x8_t, c: int8x8_t) -> int32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sdot.v2i32.v8i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sdot.v2i32.v8i8" + )] + fn _vdot_s32(a: int32x2_t, b: int8x8_t, c: int8x8_t) -> int32x2_t; } + unsafe { _vdot_s32(a, b, c) } } -#[doc = "Extract vector from pair of vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_p8)"] +#[doc = "Dot product arithmetic (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdotq_s32)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 7))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon,dotprod")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsdot))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext, N = 7) + assert_instr(sdot) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + unstable(feature = "stdarch_neon_dotprod", issue = "117224") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vext_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { - static_assert_uimm_bits!(N, 3); - unsafe { - match N & 0b111 { - 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]), - 1 => simd_shuffle!(a, b, [1, 2, 3, 4, 5, 6, 7, 8]), - 2 => simd_shuffle!(a, b, [2, 3, 4, 5, 6, 7, 8, 9]), - 3 => simd_shuffle!(a, b, [3, 4, 5, 6, 7, 8, 9, 10]), - 4 => simd_shuffle!(a, b, [4, 5, 6, 7, 8, 9, 10, 11]), - 5 => simd_shuffle!(a, b, [5, 6, 7, 8, 9, 10, 11, 12]), - 6 => simd_shuffle!(a, b, [6, 7, 8, 9, 10, 11, 12, 13]), - 7 => simd_shuffle!(a, b, [7, 8, 9, 10, 11, 12, 13, 14]), - _ => unreachable_unchecked(), - } +pub fn vdotq_s32(a: int32x4_t, b: int8x16_t, c: int8x16_t) -> int32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sdot.v4i32.v16i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sdot.v4i32.v16i8" + )] + fn _vdotq_s32(a: int32x4_t, b: int8x16_t, c: int8x16_t) -> int32x4_t; } + unsafe { _vdotq_s32(a, b, c) } } -#[doc = "Extract vector from pair of vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_p16)"] +#[doc = "Dot product arithmetic (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdot_u32)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 7))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon,dotprod")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vudot))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext, N = 7) + assert_instr(udot) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + unstable(feature = "stdarch_neon_dotprod", issue = "117224") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vextq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { - static_assert_uimm_bits!(N, 3); - unsafe { - match N & 0b111 { - 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]), - 1 => simd_shuffle!(a, b, [1, 2, 3, 4, 5, 6, 7, 8]), - 2 => simd_shuffle!(a, b, [2, 3, 4, 5, 6, 7, 8, 9]), - 3 => simd_shuffle!(a, b, [3, 4, 5, 6, 7, 8, 9, 10]), - 4 => simd_shuffle!(a, b, [4, 5, 6, 7, 8, 9, 10, 11]), - 5 => simd_shuffle!(a, b, [5, 6, 7, 8, 9, 10, 11, 12]), - 6 => simd_shuffle!(a, b, [6, 7, 8, 9, 10, 11, 12, 13]), - 7 => simd_shuffle!(a, b, [7, 8, 9, 10, 11, 12, 13, 14]), - _ => unreachable_unchecked(), - } +pub fn vdot_u32(a: uint32x2_t, b: uint8x8_t, c: uint8x8_t) -> uint32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.udot.v2i32.v8i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.udot.v2i32.v8i8" + )] + fn _vdot_u32(a: uint32x2_t, b: uint8x8_t, c: uint8x8_t) -> uint32x2_t; } + unsafe { _vdot_u32(a, b, c) } } -#[doc = "Extract vector from pair of vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_f16)"] +#[doc = "Dot product arithmetic (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdotq_u32)"] +#[inline] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon,dotprod")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vudot))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(udot) +)] +#[cfg_attr( + not(target_arch = "arm"), + unstable(feature = "stdarch_neon_dotprod", issue = "117224") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vdotq_u32(a: uint32x4_t, b: uint8x16_t, c: uint8x16_t) -> uint32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.udot.v4i32.v16i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.udot.v4i32.v16i8" + )] + fn _vdotq_u32(a: uint32x4_t, b: uint8x16_t, c: uint8x16_t) -> uint32x4_t; + } + unsafe { _vdotq_u32(a, b, c) } +} +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_f16)"] #[inline] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 7))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext, N = 7) + assert_instr(dup, N = 2) )] -#[rustc_legacy_const_generics(2)] +#[rustc_legacy_const_generics(1)] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vextq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { - static_assert_uimm_bits!(N, 3); +pub fn vdup_lane_f16(a: float16x4_t) -> float16x4_t { + static_assert_uimm_bits!(N, 2); + unsafe { simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) } +} +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_f16)"] +#[inline] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(dup, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +pub fn vdupq_lane_f16(a: float16x4_t) -> float16x8_t { + static_assert_uimm_bits!(N, 2); unsafe { - match N & 0b111 { - 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]), - 1 => simd_shuffle!(a, b, [1, 2, 3, 4, 5, 6, 7, 8]), - 2 => simd_shuffle!(a, b, [2, 3, 4, 5, 6, 7, 8, 9]), - 3 => simd_shuffle!(a, b, [3, 4, 5, 6, 7, 8, 9, 10]), - 4 => simd_shuffle!(a, b, [4, 5, 6, 7, 8, 9, 10, 11]), - 5 => simd_shuffle!(a, b, [5, 6, 7, 8, 9, 10, 11, 12]), - 6 => simd_shuffle!(a, b, [6, 7, 8, 9, 10, 11, 12, 13]), - 7 => simd_shuffle!(a, b, [7, 8, 9, 10, 11, 12, 13, 14]), - _ => unreachable_unchecked(), - } + simd_shuffle!( + a, + a, + [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32] + ) } } -#[doc = "Extract vector from pair of vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_f32)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_f32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 3))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext, N = 3) + assert_instr(dup, N = 1) )] -#[rustc_legacy_const_generics(2)] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -8921,29 +9496,21 @@ pub fn vextq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vextq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - static_assert_uimm_bits!(N, 2); - unsafe { - match N & 0b11 { - 0 => simd_shuffle!(a, b, [0, 1, 2, 3]), - 1 => simd_shuffle!(a, b, [1, 2, 3, 4]), - 2 => simd_shuffle!(a, b, [2, 3, 4, 5]), - 3 => simd_shuffle!(a, b, [3, 4, 5, 6]), - _ => unreachable_unchecked(), - } - } +pub fn vdup_lane_f32(a: float32x2_t) -> float32x2_t { + static_assert_uimm_bits!(N, 1); + unsafe { simd_shuffle!(a, a, [N as u32, N as u32]) } } -#[doc = "Extract vector from pair of vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_s16)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 3))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext, N = 3) + assert_instr(dup, N = 1) )] -#[rustc_legacy_const_generics(2)] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -8952,29 +9519,21 @@ pub fn vextq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vext_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - static_assert_uimm_bits!(N, 2); - unsafe { - match N & 0b11 { - 0 => simd_shuffle!(a, b, [0, 1, 2, 3]), - 1 => simd_shuffle!(a, b, [1, 2, 3, 4]), - 2 => simd_shuffle!(a, b, [2, 3, 4, 5]), - 3 => simd_shuffle!(a, b, [3, 4, 5, 6]), - _ => unreachable_unchecked(), - } - } +pub fn vdup_lane_s32(a: int32x2_t) -> int32x2_t { + static_assert_uimm_bits!(N, 1); + unsafe { simd_shuffle!(a, a, [N as u32, N as u32]) } } -#[doc = "Extract vector from pair of vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_s32)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 3))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext, N = 3) + assert_instr(dup, N = 1) )] -#[rustc_legacy_const_generics(2)] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -8983,29 +9542,21 @@ pub fn vext_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vextq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - static_assert_uimm_bits!(N, 2); - unsafe { - match N & 0b11 { - 0 => simd_shuffle!(a, b, [0, 1, 2, 3]), - 1 => simd_shuffle!(a, b, [1, 2, 3, 4]), - 2 => simd_shuffle!(a, b, [2, 3, 4, 5]), - 3 => simd_shuffle!(a, b, [3, 4, 5, 6]), - _ => unreachable_unchecked(), - } - } +pub fn vdup_lane_u32(a: uint32x2_t) -> uint32x2_t { + static_assert_uimm_bits!(N, 1); + unsafe { simd_shuffle!(a, a, [N as u32, N as u32]) } } -#[doc = "Extract vector from pair of vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_u16)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_f32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 3))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext, N = 3) + assert_instr(dup, N = 1) )] -#[rustc_legacy_const_generics(2)] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -9014,29 +9565,21 @@ pub fn vextq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vext_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - static_assert_uimm_bits!(N, 2); - unsafe { - match N & 0b11 { - 0 => simd_shuffle!(a, b, [0, 1, 2, 3]), - 1 => simd_shuffle!(a, b, [1, 2, 3, 4]), - 2 => simd_shuffle!(a, b, [2, 3, 4, 5]), - 3 => simd_shuffle!(a, b, [3, 4, 5, 6]), - _ => unreachable_unchecked(), - } - } +pub fn vdupq_lane_f32(a: float32x2_t) -> float32x4_t { + static_assert_uimm_bits!(N, 1); + unsafe { simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) } } -#[doc = "Extract vector from pair of vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_u32)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 3))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext, N = 3) + assert_instr(dup, N = 1) )] -#[rustc_legacy_const_generics(2)] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -9045,29 +9588,21 @@ pub fn vext_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vextq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - static_assert_uimm_bits!(N, 2); - unsafe { - match N & 0b11 { - 0 => simd_shuffle!(a, b, [0, 1, 2, 3]), - 1 => simd_shuffle!(a, b, [1, 2, 3, 4]), - 2 => simd_shuffle!(a, b, [2, 3, 4, 5]), - 3 => simd_shuffle!(a, b, [3, 4, 5, 6]), - _ => unreachable_unchecked(), - } - } +pub fn vdupq_lane_s32(a: int32x2_t) -> int32x4_t { + static_assert_uimm_bits!(N, 1); + unsafe { simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) } } -#[doc = "Extract vector from pair of vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_p16)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 3))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext, N = 3) + assert_instr(dup, N = 1) )] -#[rustc_legacy_const_generics(2)] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -9076,29 +9611,21 @@ pub fn vextq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vext_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { - static_assert_uimm_bits!(N, 2); - unsafe { - match N & 0b11 { - 0 => simd_shuffle!(a, b, [0, 1, 2, 3]), - 1 => simd_shuffle!(a, b, [1, 2, 3, 4]), - 2 => simd_shuffle!(a, b, [2, 3, 4, 5]), - 3 => simd_shuffle!(a, b, [3, 4, 5, 6]), - _ => unreachable_unchecked(), - } - } +pub fn vdupq_lane_u32(a: uint32x2_t) -> uint32x4_t { + static_assert_uimm_bits!(N, 1); + unsafe { simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) } } -#[doc = "Extract vector from pair of vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_s64)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_p16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext, N = 1) + assert_instr(dup, N = 2) )] -#[rustc_legacy_const_generics(2)] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -9107,27 +9634,21 @@ pub fn vext_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vextq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - static_assert_uimm_bits!(N, 1); - unsafe { - match N & 0b1 { - 0 => simd_shuffle!(a, b, [0, 1]), - 1 => simd_shuffle!(a, b, [1, 2]), - _ => unreachable_unchecked(), - } - } +pub fn vdup_lane_p16(a: poly16x4_t) -> poly16x4_t { + static_assert_uimm_bits!(N, 2); + unsafe { simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) } } -#[doc = "Extract vector from pair of vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_u64)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext, N = 1) + assert_instr(dup, N = 2) )] -#[rustc_legacy_const_generics(2)] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -9136,27 +9657,21 @@ pub fn vextq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vextq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - static_assert_uimm_bits!(N, 1); - unsafe { - match N & 0b1 { - 0 => simd_shuffle!(a, b, [0, 1]), - 1 => simd_shuffle!(a, b, [1, 2]), - _ => unreachable_unchecked(), - } - } +pub fn vdup_lane_s16(a: int16x4_t) -> int16x4_t { + static_assert_uimm_bits!(N, 2); + unsafe { simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) } } -#[doc = "Extract vector from pair of vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_s8)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_u16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 15))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext, N = 15) + assert_instr(dup, N = 2) )] -#[rustc_legacy_const_generics(2)] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -9165,101 +9680,21 @@ pub fn vextq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vextq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - static_assert_uimm_bits!(N, 4); - unsafe { - match N & 0b1111 { - 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), - 1 => simd_shuffle!( - a, - b, - [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16] - ), - 2 => simd_shuffle!( - a, - b, - [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17] - ), - 3 => simd_shuffle!( - a, - b, - [3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18] - ), - 4 => simd_shuffle!( - a, - b, - [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] - ), - 5 => simd_shuffle!( - a, - b, - [5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20] - ), - 6 => simd_shuffle!( - a, - b, - [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21] - ), - 7 => simd_shuffle!( - a, - b, - [7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22] - ), - 8 => simd_shuffle!( - a, - b, - [8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23] - ), - 9 => simd_shuffle!( - a, - b, - [9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24] - ), - 10 => simd_shuffle!( - a, - b, - [10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25] - ), - 11 => simd_shuffle!( - a, - b, - [11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26] - ), - 12 => simd_shuffle!( - a, - b, - [12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27] - ), - 13 => simd_shuffle!( - a, - b, - [13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28] - ), - 14 => simd_shuffle!( - a, - b, - [14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29] - ), - 15 => simd_shuffle!( - a, - b, - [15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30] - ), - _ => unreachable_unchecked(), - } - } +pub fn vdup_lane_u16(a: uint16x4_t) -> uint16x4_t { + static_assert_uimm_bits!(N, 2); + unsafe { simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) } } -#[doc = "Extract vector from pair of vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_u8)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_p16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 15))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext, N = 15) + assert_instr(dup, N = 2) )] -#[rustc_legacy_const_generics(2)] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -9268,101 +9703,27 @@ pub fn vextq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vextq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - static_assert_uimm_bits!(N, 4); +pub fn vdupq_lane_p16(a: poly16x4_t) -> poly16x8_t { + static_assert_uimm_bits!(N, 2); unsafe { - match N & 0b1111 { - 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), - 1 => simd_shuffle!( - a, - b, - [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16] - ), - 2 => simd_shuffle!( - a, - b, - [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17] - ), - 3 => simd_shuffle!( - a, - b, - [3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18] - ), - 4 => simd_shuffle!( - a, - b, - [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] - ), - 5 => simd_shuffle!( - a, - b, - [5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20] - ), - 6 => simd_shuffle!( - a, - b, - [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21] - ), - 7 => simd_shuffle!( - a, - b, - [7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22] - ), - 8 => simd_shuffle!( - a, - b, - [8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23] - ), - 9 => simd_shuffle!( - a, - b, - [9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24] - ), - 10 => simd_shuffle!( - a, - b, - [10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25] - ), - 11 => simd_shuffle!( - a, - b, - [11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26] - ), - 12 => simd_shuffle!( - a, - b, - [12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27] - ), - 13 => simd_shuffle!( - a, - b, - [13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28] - ), - 14 => simd_shuffle!( - a, - b, - [14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29] - ), - 15 => simd_shuffle!( - a, - b, - [15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30] - ), - _ => unreachable_unchecked(), - } + simd_shuffle!( + a, + a, + [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32] + ) } } -#[doc = "Extract vector from pair of vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_p8)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 15))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext, N = 15) + assert_instr(dup, N = 2) )] -#[rustc_legacy_const_generics(2)] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -9371,138 +9732,56 @@ pub fn vextq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vextq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { - static_assert_uimm_bits!(N, 4); +pub fn vdupq_lane_s16(a: int16x4_t) -> int16x8_t { + static_assert_uimm_bits!(N, 2); unsafe { - match N & 0b1111 { - 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), - 1 => simd_shuffle!( - a, - b, - [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16] - ), - 2 => simd_shuffle!( - a, - b, - [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17] - ), - 3 => simd_shuffle!( - a, - b, - [3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18] - ), - 4 => simd_shuffle!( - a, - b, - [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] - ), - 5 => simd_shuffle!( - a, - b, - [5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20] - ), - 6 => simd_shuffle!( - a, - b, - [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21] - ), - 7 => simd_shuffle!( - a, - b, - [7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22] - ), - 8 => simd_shuffle!( - a, - b, - [8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23] - ), - 9 => simd_shuffle!( - a, - b, - [9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24] - ), - 10 => simd_shuffle!( - a, - b, - [10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25] - ), - 11 => simd_shuffle!( - a, - b, - [11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26] - ), - 12 => simd_shuffle!( - a, - b, - [12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27] - ), - 13 => simd_shuffle!( - a, - b, - [13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28] - ), - 14 => simd_shuffle!( - a, - b, - [14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29] - ), - 15 => simd_shuffle!( - a, - b, - [15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30] - ), - _ => unreachable_unchecked(), - } + simd_shuffle!( + a, + a, + [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32] + ) } } -#[doc = "Floating-point fused Multiply-Add to accumulator (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_f16)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_u16)"] #[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfma))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmla) + assert_instr(dup, N = 2) )] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vfma_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.fma.v4f16")] - #[cfg_attr(target_arch = "arm", link_name = "llvm.fma.v4f16")] - fn _vfma_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t; - } - unsafe { _vfma_f16(b, c, a) } -} -#[doc = "Floating-point fused Multiply-Add to accumulator (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_f16)"] -#[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfma))] +#[rustc_legacy_const_generics(1)] #[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmla) + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") )] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vfmaq_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.fma.v8f16")] - #[cfg_attr(target_arch = "arm", link_name = "llvm.fma.v8f16")] - fn _vfmaq_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t; +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vdupq_lane_u16(a: uint16x4_t) -> uint16x8_t { + static_assert_uimm_bits!(N, 2); + unsafe { + simd_shuffle!( + a, + a, + [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32] + ) } - unsafe { _vfmaq_f16(b, c, a) } } -#[doc = "Floating-point fused Multiply-Add to accumulator(vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_f32)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_p8)"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfma))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmla) + assert_instr(dup, N = 4) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -9511,24 +9790,27 @@ pub fn vfmaq_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vfma_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.fma.v2f32")] - #[cfg_attr(target_arch = "arm", link_name = "llvm.fma.v2f32")] - fn _vfma_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t; +pub fn vdup_lane_p8(a: poly8x8_t) -> poly8x8_t { + static_assert_uimm_bits!(N, 3); + unsafe { + simd_shuffle!( + a, + a, + [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32] + ) } - unsafe { _vfma_f32(b, c, a) } } -#[doc = "Floating-point fused Multiply-Add to accumulator(vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_f32)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_s8)"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfma))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmla) + assert_instr(dup, N = 4) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -9537,24 +9819,27 @@ pub fn vfma_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vfmaq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.fma.v4f32")] - #[cfg_attr(target_arch = "arm", link_name = "llvm.fma.v4f32")] - fn _vfmaq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t; +pub fn vdup_lane_s8(a: int8x8_t) -> int8x8_t { + static_assert_uimm_bits!(N, 3); + unsafe { + simd_shuffle!( + a, + a, + [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32] + ) } - unsafe { _vfmaq_f32(b, c, a) } } -#[doc = "Floating-point fused Multiply-Add to accumulator(vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_n_f32)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_u8)"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfma))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmla) + assert_instr(dup, N = 4) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -9563,19 +9848,27 @@ pub fn vfmaq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vfma_n_f32(a: float32x2_t, b: float32x2_t, c: f32) -> float32x2_t { - vfma_f32(a, b, vdup_n_f32_vfp4(c)) +pub fn vdup_lane_u8(a: uint8x8_t) -> uint8x8_t { + static_assert_uimm_bits!(N, 3); + unsafe { + simd_shuffle!( + a, + a, + [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32] + ) + } } -#[doc = "Floating-point fused Multiply-Add to accumulator(vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_n_f32)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_p8)"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfma))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmla) + assert_instr(dup, N = 4) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -9584,55 +9877,30 @@ pub fn vfma_n_f32(a: float32x2_t, b: float32x2_t, c: f32) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vfmaq_n_f32(a: float32x4_t, b: float32x4_t, c: f32) -> float32x4_t { - vfmaq_f32(a, b, vdupq_n_f32_vfp4(c)) -} -#[doc = "Floating-point fused multiply-subtract from accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_f16)"] -#[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmls) -)] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vfms_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t { - unsafe { - let b: float16x4_t = simd_neg(b); - vfma_f16(a, b, c) - } -} -#[doc = "Floating-point fused multiply-subtract from accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_f16)"] -#[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmls) -)] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vfmsq_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t { +pub fn vdupq_lane_p8(a: poly8x8_t) -> poly8x16_t { + static_assert_uimm_bits!(N, 3); unsafe { - let b: float16x8_t = simd_neg(b); - vfmaq_f16(a, b, c) + simd_shuffle!( + a, + a, + [ + N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, + N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32 + ] + ) } } -#[doc = "Floating-point fused multiply-subtract from accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_f32)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_s8)"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfms))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmls) + assert_instr(dup, N = 4) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -9641,22 +9909,30 @@ pub fn vfmsq_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vfms_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { +pub fn vdupq_lane_s8(a: int8x8_t) -> int8x16_t { + static_assert_uimm_bits!(N, 3); unsafe { - let b: float32x2_t = simd_neg(b); - vfma_f32(a, b, c) + simd_shuffle!( + a, + a, + [ + N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, + N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32 + ] + ) } } -#[doc = "Floating-point fused multiply-subtract from accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_f32)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_u8)"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfms))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmls) + assert_instr(dup, N = 4) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -9665,22 +9941,30 @@ pub fn vfms_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vfmsq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { +pub fn vdupq_lane_u8(a: uint8x8_t) -> uint8x16_t { + static_assert_uimm_bits!(N, 3); unsafe { - let b: float32x4_t = simd_neg(b); - vfmaq_f32(a, b, c) + simd_shuffle!( + a, + a, + [ + N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, + N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32 + ] + ) } } -#[doc = "Floating-point fused Multiply-subtract to accumulator(vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_n_f32)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_s64)"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfms))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, N = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmls) + assert_instr(nop, N = 0) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -9689,19 +9973,21 @@ pub fn vfmsq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vfms_n_f32(a: float32x2_t, b: float32x2_t, c: f32) -> float32x2_t { - vfms_f32(a, b, vdup_n_f32_vfp4(c)) +pub fn vdup_lane_s64(a: int64x1_t) -> int64x1_t { + static_assert!(N == 0); + a } -#[doc = "Floating-point fused Multiply-subtract to accumulator(vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_n_f32)"] -#[inline] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_u64)"] +#[inline] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfms))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, N = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmls) + assert_instr(nop, N = 0) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -9710,71 +9996,59 @@ pub fn vfms_n_f32(a: float32x2_t, b: float32x2_t, c: f32) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vfmsq_n_f32(a: float32x4_t, b: float32x4_t, c: f32) -> float32x4_t { - vfmsq_f32(a, b, vdupq_n_f32_vfp4(c)) -} -#[doc = "Duplicate vector element to vector"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_high_f16)"] -#[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -#[cfg_attr(test, assert_instr(nop))] -pub fn vget_high_f16(a: float16x8_t) -> float16x4_t { - unsafe { simd_shuffle!(a, a, [4, 5, 6, 7]) } -} -#[doc = "Duplicate vector element to vector"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_low_f16)"] -#[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -#[cfg_attr(test, assert_instr(nop))] -pub fn vget_low_f16(a: float16x8_t) -> float16x4_t { - unsafe { simd_shuffle!(a, a, [0, 1, 2, 3]) } +pub fn vdup_lane_u64(a: uint64x1_t) -> uint64x1_t { + static_assert!(N == 0); + a } -#[doc = "Duplicate vector element to scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_lane_f16)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_f16)"] #[inline] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[target_feature(enable = "neon,fp16")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(dup, N = 4) )] #[rustc_legacy_const_generics(1)] +#[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vget_lane_f16(a: float16x4_t) -> f16 { - static_assert_uimm_bits!(LANE, 2); - unsafe { simd_extract!(a, LANE as u32) } +pub fn vdup_laneq_f16(a: float16x8_t) -> float16x4_t { + static_assert_uimm_bits!(N, 3); + unsafe { simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) } } -#[doc = "Duplicate vector element to scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vgetq_lane_f16)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_f16)"] #[inline] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[target_feature(enable = "neon,fp16")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(dup, N = 4) )] #[rustc_legacy_const_generics(1)] +#[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vgetq_lane_f16(a: float16x8_t) -> f16 { - static_assert_uimm_bits!(LANE, 3); - unsafe { simd_extract!(a, LANE as u32) } +pub fn vdupq_laneq_f16(a: float16x8_t) -> float16x8_t { + static_assert_uimm_bits!(N, 3); + unsafe { + simd_shuffle!( + a, + a, + [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32] + ) + } } -#[doc = "Halving add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhadd_s8)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_f32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.s8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shadd) + assert_instr(dup, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -9783,27 +10057,21 @@ pub fn vgetq_lane_f16(a: float16x8_t) -> f16 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vhadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.shadd.v8i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhadds.v8i8")] - fn _vhadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; - } - unsafe { _vhadd_s8(a, b) } +pub fn vdup_laneq_f32(a: float32x4_t) -> float32x2_t { + static_assert_uimm_bits!(N, 2); + unsafe { simd_shuffle!(a, a, [N as u32, N as u32]) } } -#[doc = "Halving add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhaddq_s8)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.s8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shadd) + assert_instr(dup, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -9812,27 +10080,21 @@ pub fn vhadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vhaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.shadd.v16i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhadds.v16i8")] - fn _vhaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; - } - unsafe { _vhaddq_s8(a, b) } +pub fn vdup_laneq_s32(a: int32x4_t) -> int32x2_t { + static_assert_uimm_bits!(N, 2); + unsafe { simd_shuffle!(a, a, [N as u32, N as u32]) } } -#[doc = "Halving add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhadd_s16)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.s16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shadd) + assert_instr(dup, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -9841,27 +10103,21 @@ pub fn vhaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vhadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.shadd.v4i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhadds.v4i16")] - fn _vhadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - } - unsafe { _vhadd_s16(a, b) } +pub fn vdup_laneq_u32(a: uint32x4_t) -> uint32x2_t { + static_assert_uimm_bits!(N, 2); + unsafe { simd_shuffle!(a, a, [N as u32, N as u32]) } } -#[doc = "Halving add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhaddq_s16)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_f32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.s16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shadd) + assert_instr(dup, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -9870,27 +10126,21 @@ pub fn vhadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vhaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.shadd.v8i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhadds.v8i16")] - fn _vhaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; - } - unsafe { _vhaddq_s16(a, b) } +pub fn vdupq_laneq_f32(a: float32x4_t) -> float32x4_t { + static_assert_uimm_bits!(N, 2); + unsafe { simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) } } -#[doc = "Halving add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhadd_s32)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.s32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shadd) + assert_instr(dup, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -9899,27 +10149,21 @@ pub fn vhaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vhadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.shadd.v2i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhadds.v2i32")] - fn _vhadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - } - unsafe { _vhadd_s32(a, b) } +pub fn vdupq_laneq_s32(a: int32x4_t) -> int32x4_t { + static_assert_uimm_bits!(N, 2); + unsafe { simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) } } -#[doc = "Halving add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhaddq_s32)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.s32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shadd) + assert_instr(dup, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -9928,27 +10172,21 @@ pub fn vhadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vhaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.shadd.v4i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhadds.v4i32")] - fn _vhaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - } - unsafe { _vhaddq_s32(a, b) } +pub fn vdupq_laneq_u32(a: uint32x4_t) -> uint32x4_t { + static_assert_uimm_bits!(N, 2); + unsafe { simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) } } -#[doc = "Halving add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhadd_u8)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_p16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.u8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uhadd) + assert_instr(dup, N = 4) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -9957,27 +10195,21 @@ pub fn vhaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vhadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uhadd.v8i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhaddu.v8i8")] - fn _vhadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t; - } - unsafe { _vhadd_u8(a, b) } +pub fn vdup_laneq_p16(a: poly16x8_t) -> poly16x4_t { + static_assert_uimm_bits!(N, 3); + unsafe { simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) } } -#[doc = "Halving add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhaddq_u8)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.u8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uhadd) + assert_instr(dup, N = 4) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -9986,27 +10218,21 @@ pub fn vhadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vhaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uhadd.v16i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhaddu.v16i8")] - fn _vhaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t; - } - unsafe { _vhaddq_u8(a, b) } +pub fn vdup_laneq_s16(a: int16x8_t) -> int16x4_t { + static_assert_uimm_bits!(N, 3); + unsafe { simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) } } -#[doc = "Halving add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhadd_u16)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_u16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.u16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uhadd) + assert_instr(dup, N = 4) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -10015,27 +10241,21 @@ pub fn vhaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vhadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uhadd.v4i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhaddu.v4i16")] - fn _vhadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t; - } - unsafe { _vhadd_u16(a, b) } +pub fn vdup_laneq_u16(a: uint16x8_t) -> uint16x4_t { + static_assert_uimm_bits!(N, 3); + unsafe { simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) } } -#[doc = "Halving add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhaddq_u16)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_p16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.u16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uhadd) + assert_instr(dup, N = 4) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -10044,27 +10264,27 @@ pub fn vhadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vhaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uhadd.v8i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhaddu.v8i16")] - fn _vhaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t; +pub fn vdupq_laneq_p16(a: poly16x8_t) -> poly16x8_t { + static_assert_uimm_bits!(N, 3); + unsafe { + simd_shuffle!( + a, + a, + [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32] + ) } - unsafe { _vhaddq_u16(a, b) } } -#[doc = "Halving add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhadd_u32)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.u32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uhadd) + assert_instr(dup, N = 4) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -10073,27 +10293,27 @@ pub fn vhaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vhadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uhadd.v2i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhaddu.v2i32")] - fn _vhadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t; +pub fn vdupq_laneq_s16(a: int16x8_t) -> int16x8_t { + static_assert_uimm_bits!(N, 3); + unsafe { + simd_shuffle!( + a, + a, + [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32] + ) } - unsafe { _vhadd_u32(a, b) } } -#[doc = "Halving add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhaddq_u32)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_u16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.u32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uhadd) + assert_instr(dup, N = 4) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -10102,27 +10322,27 @@ pub fn vhadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vhaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uhadd.v4i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhaddu.v4i32")] - fn _vhaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t; +pub fn vdupq_laneq_u16(a: uint16x8_t) -> uint16x8_t { + static_assert_uimm_bits!(N, 3); + unsafe { + simd_shuffle!( + a, + a, + [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32] + ) } - unsafe { _vhaddq_u32(a, b) } } -#[doc = "Signed halving subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsub_s16)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_p8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.s16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 8))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shsub) + assert_instr(dup, N = 8) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -10131,27 +10351,27 @@ pub fn vhaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vhsub_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.shsub.v4i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubs.v4i16")] - fn _vhsub_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; +pub fn vdup_laneq_p8(a: poly8x16_t) -> poly8x8_t { + static_assert_uimm_bits!(N, 4); + unsafe { + simd_shuffle!( + a, + a, + [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32] + ) } - unsafe { _vhsub_s16(a, b) } } -#[doc = "Signed halving subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsubq_s16)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_s8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.s16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 8))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shsub) + assert_instr(dup, N = 8) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -10160,27 +10380,27 @@ pub fn vhsub_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vhsubq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.shsub.v8i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubs.v8i16")] - fn _vhsubq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; +pub fn vdup_laneq_s8(a: int8x16_t) -> int8x8_t { + static_assert_uimm_bits!(N, 4); + unsafe { + simd_shuffle!( + a, + a, + [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32] + ) } - unsafe { _vhsubq_s16(a, b) } } -#[doc = "Signed halving subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsub_s32)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_u8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.s32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 8))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shsub) + assert_instr(dup, N = 8) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -10189,27 +10409,27 @@ pub fn vhsubq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vhsub_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.shsub.v2i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubs.v2i32")] - fn _vhsub_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; +pub fn vdup_laneq_u8(a: uint8x16_t) -> uint8x8_t { + static_assert_uimm_bits!(N, 4); + unsafe { + simd_shuffle!( + a, + a, + [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32] + ) } - unsafe { _vhsub_s32(a, b) } } -#[doc = "Signed halving subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsubq_s32)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_p8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.s32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 8))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shsub) + assert_instr(dup, N = 8) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -10218,27 +10438,30 @@ pub fn vhsub_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vhsubq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.shsub.v4i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubs.v4i32")] - fn _vhsubq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; +pub fn vdupq_laneq_p8(a: poly8x16_t) -> poly8x16_t { + static_assert_uimm_bits!(N, 4); + unsafe { + simd_shuffle!( + a, + a, + [ + N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, + N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32 + ] + ) } - unsafe { _vhsubq_s32(a, b) } } -#[doc = "Signed halving subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsub_s8)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_s8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.s8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 8))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shsub) + assert_instr(dup, N = 8) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -10247,27 +10470,30 @@ pub fn vhsubq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vhsub_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.shsub.v8i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubs.v8i8")] - fn _vhsub_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; +pub fn vdupq_laneq_s8(a: int8x16_t) -> int8x16_t { + static_assert_uimm_bits!(N, 4); + unsafe { + simd_shuffle!( + a, + a, + [ + N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, + N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32 + ] + ) } - unsafe { _vhsub_s8(a, b) } } -#[doc = "Signed halving subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsubq_s8)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_u8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.s8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 8))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shsub) + assert_instr(dup, N = 8) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -10276,27 +10502,30 @@ pub fn vhsub_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vhsubq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.shsub.v16i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubs.v16i8")] - fn _vhsubq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; - } - unsafe { _vhsubq_s8(a, b) } -} -#[doc = "Signed halving subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsub_u8)"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.u8"))] -#[cfg_attr( +pub fn vdupq_laneq_u8(a: uint8x16_t) -> uint8x16_t { + static_assert_uimm_bits!(N, 4); + unsafe { + simd_shuffle!( + a, + a, + [ + N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, + N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32 + ] + ) + } +} +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_s64)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 1))] +#[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uhsub) + assert_instr(nop, N = 1) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -10305,27 +10534,21 @@ pub fn vhsubq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vhsub_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uhsub.v8i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubu.v8i8")] - fn _vhsub_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t; - } - unsafe { _vhsub_u8(a, b) } +pub fn vdup_laneq_s64(a: int64x2_t) -> int64x1_t { + static_assert_uimm_bits!(N, 1); + unsafe { transmute::(simd_extract!(a, N as u32)) } } -#[doc = "Signed halving subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsubq_u8)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_u64)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.u8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uhsub) + assert_instr(nop, N = 1) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -10334,26 +10557,47 @@ pub fn vhsub_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vhsubq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uhsub.v16i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubu.v16i8")] - fn _vhsubq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t; - } - unsafe { _vhsubq_u8(a, b) } +pub fn vdup_laneq_u64(a: uint64x2_t) -> uint64x1_t { + static_assert_uimm_bits!(N, 1); + unsafe { transmute::(simd_extract!(a, N as u32)) } } -#[doc = "Signed halving subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsub_u16)"] +#[doc = "Create a new vector with all lanes set to a value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_n_f16)"] +#[inline] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(dup) +)] +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +pub fn vdup_n_f16(a: f16) -> float16x4_t { + float16x4_t::splat(a) +} +#[doc = "Create a new vector with all lanes set to a value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_n_f16)"] +#[inline] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(dup) +)] +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +pub fn vdupq_n_f16(a: f16) -> float16x8_t { + float16x8_t::splat(a) +} +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_n_f32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.u16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uhsub) + assert_instr(dup) )] #[cfg_attr( not(target_arch = "arm"), @@ -10363,26 +10607,18 @@ pub fn vhsubq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vhsub_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uhsub.v4i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubu.v4i16")] - fn _vhsub_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t; - } - unsafe { _vhsub_u16(a, b) } +pub fn vdup_n_f32(value: f32) -> float32x2_t { + float32x2_t::splat(value) } -#[doc = "Signed halving subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsubq_u16)"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_n_p16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.u16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uhsub) + assert_instr(dup) )] #[cfg_attr( not(target_arch = "arm"), @@ -10392,26 +10628,18 @@ pub fn vhsub_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vhsubq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uhsub.v8i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubu.v8i16")] - fn _vhsubq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t; - } - unsafe { _vhsubq_u16(a, b) } +pub fn vdup_n_p16(value: p16) -> poly16x4_t { + poly16x4_t::splat(value) } -#[doc = "Signed halving subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsub_u32)"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_n_p8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.u32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uhsub) + assert_instr(dup) )] #[cfg_attr( not(target_arch = "arm"), @@ -10421,26 +10649,18 @@ pub fn vhsubq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vhsub_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uhsub.v2i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubu.v2i32")] - fn _vhsub_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t; - } - unsafe { _vhsub_u32(a, b) } +pub fn vdup_n_p8(value: p8) -> poly8x8_t { + poly8x8_t::splat(value) } -#[doc = "Signed halving subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsubq_u32)"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_n_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.u32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uhsub) + assert_instr(dup) )] #[cfg_attr( not(target_arch = "arm"), @@ -10450,291 +10670,4539 @@ pub fn vhsub_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vhsubq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uhsub.v4i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubu.v4i32")] - fn _vhsubq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t; - } - unsafe { _vhsubq_u32(a, b) } +pub fn vdup_n_s16(value: i16) -> int16x4_t { + int16x4_t::splat(value) } -#[doc = "Load one single-element structure and replicate to all lanes of one register"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_dup_f16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_n_s32)"] #[inline] +#[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1r) + assert_instr(dup) )] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub unsafe fn vld1_dup_f16(ptr: *const f16) -> float16x4_t { - let x: float16x4_t = vld1_lane_f16::<0>(ptr, transmute(f16x4::splat(0.))); - simd_shuffle!(x, x, [0, 0, 0, 0]) +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vdup_n_s32(value: i32) -> int32x2_t { + int32x2_t::splat(value) } -#[doc = "Load one single-element structure and replicate to all lanes of one register"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_dup_f16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_n_s64)"] #[inline] +#[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1r) + assert_instr(fmov) )] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub unsafe fn vld1q_dup_f16(ptr: *const f16) -> float16x8_t { - let x: float16x8_t = vld1q_lane_f16::<0>(ptr, transmute(f16x8::splat(0.))); - simd_shuffle!(x, x, [0, 0, 0, 0, 0, 0, 0, 0]) +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vdup_n_s64(value: i64) -> int64x1_t { + int64x1_t::splat(value) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_n_s8)"] #[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))] -pub unsafe fn vld1_f16(ptr: *const f16) -> float16x4_t { - transmute(vld1_v4f16( - ptr as *const i8, - crate::mem::align_of::() as i32, - )) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(dup) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vdup_n_s8(value: i8) -> int8x8_t { + int8x8_t::splat(value) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_n_u16)"] #[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))] -pub unsafe fn vld1_f16(ptr: *const f16) -> float16x4_t { - let ret_val: float16x4_t = transmute(vld1_v4f16( - ptr as *const i8, - crate::mem::align_of::() as i32, - )); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(dup) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vdup_n_u16(value: u16) -> uint16x4_t { + uint16x4_t::splat(value) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_n_u32)"] #[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))] -pub unsafe fn vld1q_f16(ptr: *const f16) -> float16x8_t { - transmute(vld1q_v8f16( - ptr as *const i8, - crate::mem::align_of::() as i32, - )) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(dup) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vdup_n_u32(value: u32) -> uint32x2_t { + uint32x2_t::splat(value) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_n_u64)"] #[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))] -pub unsafe fn vld1q_f16(ptr: *const f16) -> float16x8_t { - let ret_val: float16x8_t = transmute(vld1q_v8f16( - ptr as *const i8, - crate::mem::align_of::() as i32, - )); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fmov) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vdup_n_u64(value: u64) -> uint64x1_t { + uint64x1_t::splat(value) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f16_x2)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_n_u8)"] #[inline] +#[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(dup) )] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub unsafe fn vld1_f16_x2(a: *const f16) -> float16x4x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x2.v4f16.p0f16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x2.v4f16.p0f16")] - fn _vld1_f16_x2(a: *const f16) -> float16x4x2_t; - } - _vld1_f16_x2(a) +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vdup_n_u8(value: u8) -> uint8x8_t { + uint8x8_t::splat(value) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f16_x3)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_n_f32)"] #[inline] +#[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(dup) )] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub unsafe fn vld1_f16_x3(a: *const f16) -> float16x4x3_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x3.v4f16.p0f16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x3.v4f16.p0f16")] - fn _vld1_f16_x3(a: *const f16) -> float16x4x3_t; - } - _vld1_f16_x3(a) +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vdupq_n_f32(value: f32) -> float32x4_t { + float32x4_t::splat(value) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f16_x4)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_n_p16)"] #[inline] +#[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(dup) )] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub unsafe fn vld1_f16_x4(a: *const f16) -> float16x4x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x4.v4f16.p0f16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x4.v4f16.p0f16")] - fn _vld1_f16_x4(a: *const f16) -> float16x4x4_t; - } - _vld1_f16_x4(a) +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vdupq_n_p16(value: p16) -> poly16x8_t { + poly16x8_t::splat(value) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f16_x2)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_n_p8)"] #[inline] +#[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(dup) )] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub unsafe fn vld1q_f16_x2(a: *const f16) -> float16x8x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x2.v8f16.p0f16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x2.v8f16.p0f16")] - fn _vld1q_f16_x2(a: *const f16) -> float16x8x2_t; - } - _vld1q_f16_x2(a) +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vdupq_n_p8(value: u8) -> poly8x16_t { + poly8x16_t::splat(value) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f16_x3)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_n_s16)"] #[inline] +#[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(dup) )] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub unsafe fn vld1q_f16_x3(a: *const f16) -> float16x8x3_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x3.v8f16.p0f16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x3.v8f16.p0f16")] - fn _vld1q_f16_x3(a: *const f16) -> float16x8x3_t; - } - _vld1q_f16_x3(a) +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vdupq_n_s16(value: i16) -> int16x8_t { + int16x8_t::splat(value) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f16_x4)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_n_s32)"] #[inline] +#[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(dup) )] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub unsafe fn vld1q_f16_x4(a: *const f16) -> float16x8x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x4.v8f16.p0f16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x4.v8f16.p0f16")] - fn _vld1q_f16_x4(a: *const f16) -> float16x8x4_t; - } - _vld1q_f16_x4(a) +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vdupq_n_s32(value: i32) -> int32x4_t { + int32x4_t::splat(value) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_n_s64)"] #[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vldr))] -pub unsafe fn vld1_f32(ptr: *const f32) -> float32x2_t { - transmute(vld1_v2f32( - ptr as *const i8, - crate::mem::align_of::() as i32, - )) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(dup) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vdupq_n_s64(value: i64) -> int64x2_t { + int64x2_t::splat(value) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_n_s8)"] #[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vldr))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(dup) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vdupq_n_s8(value: i8) -> int8x16_t { + int8x16_t::splat(value) +} +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_n_u16)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(dup) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vdupq_n_u16(value: u16) -> uint16x8_t { + uint16x8_t::splat(value) +} +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_n_u32)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(dup) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vdupq_n_u32(value: u32) -> uint32x4_t { + uint32x4_t::splat(value) +} +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_n_u64)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(dup) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vdupq_n_u64(value: u64) -> uint64x2_t { + uint64x2_t::splat(value) +} +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_n_u8)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(dup) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vdupq_n_u8(value: u8) -> uint8x16_t { + uint8x16_t::splat(value) +} +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_n_f32_vfp4)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(dup) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +fn vdup_n_f32_vfp4(value: f32) -> float32x2_t { + float32x2_t::splat(value) +} +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_n_f32_vfp4)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(dup) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +fn vdupq_n_f32_vfp4(value: f32) -> float32x4_t { + float32x4_t::splat(value) +} +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_s64)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(dup, N = 0) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vdupq_lane_s64(a: int64x1_t) -> int64x2_t { + static_assert!(N == 0); + unsafe { simd_shuffle!(a, a, [N as u32, N as u32]) } +} +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_u64)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(dup, N = 0) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vdupq_lane_u64(a: uint64x1_t) -> uint64x2_t { + static_assert!(N == 0); + unsafe { simd_shuffle!(a, a, [N as u32, N as u32]) } +} +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_s64)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(dup, N = 1) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vdupq_laneq_s64(a: int64x2_t) -> int64x2_t { + static_assert_uimm_bits!(N, 1); + unsafe { simd_shuffle!(a, a, [N as u32, N as u32]) } +} +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_u64)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(dup, N = 1) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vdupq_laneq_u64(a: uint64x2_t) -> uint64x2_t { + static_assert_uimm_bits!(N, 1); + unsafe { simd_shuffle!(a, a, [N as u32, N as u32]) } +} +#[doc = "Vector bitwise exclusive or (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor_s8)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(eor) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn veor_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + unsafe { simd_xor(a, b) } +} +#[doc = "Vector bitwise exclusive or (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veorq_s8)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(eor) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn veorq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + unsafe { simd_xor(a, b) } +} +#[doc = "Vector bitwise exclusive or (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor_s16)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(eor) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn veor_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + unsafe { simd_xor(a, b) } +} +#[doc = "Vector bitwise exclusive or (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veorq_s16)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(eor) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn veorq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + unsafe { simd_xor(a, b) } +} +#[doc = "Vector bitwise exclusive or (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor_s32)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(eor) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn veor_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + unsafe { simd_xor(a, b) } +} +#[doc = "Vector bitwise exclusive or (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veorq_s32)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(eor) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn veorq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + unsafe { simd_xor(a, b) } +} +#[doc = "Vector bitwise exclusive or (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor_s64)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(eor) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn veor_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { + unsafe { simd_xor(a, b) } +} +#[doc = "Vector bitwise exclusive or (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veorq_s64)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(eor) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn veorq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { + unsafe { simd_xor(a, b) } +} +#[doc = "Vector bitwise exclusive or (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor_u8)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(eor) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn veor_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + unsafe { simd_xor(a, b) } +} +#[doc = "Vector bitwise exclusive or (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veorq_u8)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(eor) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn veorq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + unsafe { simd_xor(a, b) } +} +#[doc = "Vector bitwise exclusive or (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor_u16)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(eor) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn veor_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + unsafe { simd_xor(a, b) } +} +#[doc = "Vector bitwise exclusive or (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veorq_u16)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(eor) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn veorq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + unsafe { simd_xor(a, b) } +} +#[doc = "Vector bitwise exclusive or (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor_u32)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(eor) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn veor_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + unsafe { simd_xor(a, b) } +} +#[doc = "Vector bitwise exclusive or (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veorq_u32)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(eor) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn veorq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + unsafe { simd_xor(a, b) } +} +#[doc = "Vector bitwise exclusive or (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor_u64)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(eor) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn veor_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { + unsafe { simd_xor(a, b) } +} +#[doc = "Vector bitwise exclusive or (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veorq_u64)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(eor) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn veorq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + unsafe { simd_xor(a, b) } +} +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_f16)"] +#[inline] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 3))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ext, N = 3) +)] +#[rustc_legacy_const_generics(2)] +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +pub fn vext_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { + static_assert_uimm_bits!(N, 2); + unsafe { + match N & 0b11 { + 0 => simd_shuffle!(a, b, [0, 1, 2, 3]), + 1 => simd_shuffle!(a, b, [1, 2, 3, 4]), + 2 => simd_shuffle!(a, b, [2, 3, 4, 5]), + 3 => simd_shuffle!(a, b, [3, 4, 5, 6]), + _ => unreachable_unchecked(), + } + } +} +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_f32)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ext, N = 1) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vext_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { + static_assert_uimm_bits!(N, 1); + unsafe { + match N & 0b1 { + 0 => simd_shuffle!(a, b, [0, 1]), + 1 => simd_shuffle!(a, b, [1, 2]), + _ => unreachable_unchecked(), + } + } +} +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_s32)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ext, N = 1) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vext_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + static_assert_uimm_bits!(N, 1); + unsafe { + match N & 0b1 { + 0 => simd_shuffle!(a, b, [0, 1]), + 1 => simd_shuffle!(a, b, [1, 2]), + _ => unreachable_unchecked(), + } + } +} +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_u32)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ext, N = 1) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vext_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + static_assert_uimm_bits!(N, 1); + unsafe { + match N & 0b1 { + 0 => simd_shuffle!(a, b, [0, 1]), + 1 => simd_shuffle!(a, b, [1, 2]), + _ => unreachable_unchecked(), + } + } +} +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, N = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop, N = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vext_s64(a: int64x1_t, _b: int64x1_t) -> int64x1_t { + static_assert!(N == 0); + a +} +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, N = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop, N = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vext_u64(a: uint64x1_t, _b: uint64x1_t) -> uint64x1_t { + static_assert!(N == 0); + a +} +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_s8)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 7))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ext, N = 7) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vext_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + static_assert_uimm_bits!(N, 3); + unsafe { + match N & 0b111 { + 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]), + 1 => simd_shuffle!(a, b, [1, 2, 3, 4, 5, 6, 7, 8]), + 2 => simd_shuffle!(a, b, [2, 3, 4, 5, 6, 7, 8, 9]), + 3 => simd_shuffle!(a, b, [3, 4, 5, 6, 7, 8, 9, 10]), + 4 => simd_shuffle!(a, b, [4, 5, 6, 7, 8, 9, 10, 11]), + 5 => simd_shuffle!(a, b, [5, 6, 7, 8, 9, 10, 11, 12]), + 6 => simd_shuffle!(a, b, [6, 7, 8, 9, 10, 11, 12, 13]), + 7 => simd_shuffle!(a, b, [7, 8, 9, 10, 11, 12, 13, 14]), + _ => unreachable_unchecked(), + } + } +} +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_s16)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 7))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ext, N = 7) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vextq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + static_assert_uimm_bits!(N, 3); + unsafe { + match N & 0b111 { + 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]), + 1 => simd_shuffle!(a, b, [1, 2, 3, 4, 5, 6, 7, 8]), + 2 => simd_shuffle!(a, b, [2, 3, 4, 5, 6, 7, 8, 9]), + 3 => simd_shuffle!(a, b, [3, 4, 5, 6, 7, 8, 9, 10]), + 4 => simd_shuffle!(a, b, [4, 5, 6, 7, 8, 9, 10, 11]), + 5 => simd_shuffle!(a, b, [5, 6, 7, 8, 9, 10, 11, 12]), + 6 => simd_shuffle!(a, b, [6, 7, 8, 9, 10, 11, 12, 13]), + 7 => simd_shuffle!(a, b, [7, 8, 9, 10, 11, 12, 13, 14]), + _ => unreachable_unchecked(), + } + } +} +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_u8)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 7))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ext, N = 7) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vext_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + static_assert_uimm_bits!(N, 3); + unsafe { + match N & 0b111 { + 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]), + 1 => simd_shuffle!(a, b, [1, 2, 3, 4, 5, 6, 7, 8]), + 2 => simd_shuffle!(a, b, [2, 3, 4, 5, 6, 7, 8, 9]), + 3 => simd_shuffle!(a, b, [3, 4, 5, 6, 7, 8, 9, 10]), + 4 => simd_shuffle!(a, b, [4, 5, 6, 7, 8, 9, 10, 11]), + 5 => simd_shuffle!(a, b, [5, 6, 7, 8, 9, 10, 11, 12]), + 6 => simd_shuffle!(a, b, [6, 7, 8, 9, 10, 11, 12, 13]), + 7 => simd_shuffle!(a, b, [7, 8, 9, 10, 11, 12, 13, 14]), + _ => unreachable_unchecked(), + } + } +} +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_u16)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 7))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ext, N = 7) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vextq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + static_assert_uimm_bits!(N, 3); + unsafe { + match N & 0b111 { + 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]), + 1 => simd_shuffle!(a, b, [1, 2, 3, 4, 5, 6, 7, 8]), + 2 => simd_shuffle!(a, b, [2, 3, 4, 5, 6, 7, 8, 9]), + 3 => simd_shuffle!(a, b, [3, 4, 5, 6, 7, 8, 9, 10]), + 4 => simd_shuffle!(a, b, [4, 5, 6, 7, 8, 9, 10, 11]), + 5 => simd_shuffle!(a, b, [5, 6, 7, 8, 9, 10, 11, 12]), + 6 => simd_shuffle!(a, b, [6, 7, 8, 9, 10, 11, 12, 13]), + 7 => simd_shuffle!(a, b, [7, 8, 9, 10, 11, 12, 13, 14]), + _ => unreachable_unchecked(), + } + } +} +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_p8)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 7))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ext, N = 7) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vext_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { + static_assert_uimm_bits!(N, 3); + unsafe { + match N & 0b111 { + 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]), + 1 => simd_shuffle!(a, b, [1, 2, 3, 4, 5, 6, 7, 8]), + 2 => simd_shuffle!(a, b, [2, 3, 4, 5, 6, 7, 8, 9]), + 3 => simd_shuffle!(a, b, [3, 4, 5, 6, 7, 8, 9, 10]), + 4 => simd_shuffle!(a, b, [4, 5, 6, 7, 8, 9, 10, 11]), + 5 => simd_shuffle!(a, b, [5, 6, 7, 8, 9, 10, 11, 12]), + 6 => simd_shuffle!(a, b, [6, 7, 8, 9, 10, 11, 12, 13]), + 7 => simd_shuffle!(a, b, [7, 8, 9, 10, 11, 12, 13, 14]), + _ => unreachable_unchecked(), + } + } +} +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_p16)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 7))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ext, N = 7) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vextq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { + static_assert_uimm_bits!(N, 3); + unsafe { + match N & 0b111 { + 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]), + 1 => simd_shuffle!(a, b, [1, 2, 3, 4, 5, 6, 7, 8]), + 2 => simd_shuffle!(a, b, [2, 3, 4, 5, 6, 7, 8, 9]), + 3 => simd_shuffle!(a, b, [3, 4, 5, 6, 7, 8, 9, 10]), + 4 => simd_shuffle!(a, b, [4, 5, 6, 7, 8, 9, 10, 11]), + 5 => simd_shuffle!(a, b, [5, 6, 7, 8, 9, 10, 11, 12]), + 6 => simd_shuffle!(a, b, [6, 7, 8, 9, 10, 11, 12, 13]), + 7 => simd_shuffle!(a, b, [7, 8, 9, 10, 11, 12, 13, 14]), + _ => unreachable_unchecked(), + } + } +} +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_f16)"] +#[inline] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 7))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ext, N = 7) +)] +#[rustc_legacy_const_generics(2)] +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +pub fn vextq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { + static_assert_uimm_bits!(N, 3); + unsafe { + match N & 0b111 { + 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]), + 1 => simd_shuffle!(a, b, [1, 2, 3, 4, 5, 6, 7, 8]), + 2 => simd_shuffle!(a, b, [2, 3, 4, 5, 6, 7, 8, 9]), + 3 => simd_shuffle!(a, b, [3, 4, 5, 6, 7, 8, 9, 10]), + 4 => simd_shuffle!(a, b, [4, 5, 6, 7, 8, 9, 10, 11]), + 5 => simd_shuffle!(a, b, [5, 6, 7, 8, 9, 10, 11, 12]), + 6 => simd_shuffle!(a, b, [6, 7, 8, 9, 10, 11, 12, 13]), + 7 => simd_shuffle!(a, b, [7, 8, 9, 10, 11, 12, 13, 14]), + _ => unreachable_unchecked(), + } + } +} +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_f32)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 3))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ext, N = 3) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vextq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { + static_assert_uimm_bits!(N, 2); + unsafe { + match N & 0b11 { + 0 => simd_shuffle!(a, b, [0, 1, 2, 3]), + 1 => simd_shuffle!(a, b, [1, 2, 3, 4]), + 2 => simd_shuffle!(a, b, [2, 3, 4, 5]), + 3 => simd_shuffle!(a, b, [3, 4, 5, 6]), + _ => unreachable_unchecked(), + } + } +} +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_s16)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 3))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ext, N = 3) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vext_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + static_assert_uimm_bits!(N, 2); + unsafe { + match N & 0b11 { + 0 => simd_shuffle!(a, b, [0, 1, 2, 3]), + 1 => simd_shuffle!(a, b, [1, 2, 3, 4]), + 2 => simd_shuffle!(a, b, [2, 3, 4, 5]), + 3 => simd_shuffle!(a, b, [3, 4, 5, 6]), + _ => unreachable_unchecked(), + } + } +} +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_s32)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 3))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ext, N = 3) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vextq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + static_assert_uimm_bits!(N, 2); + unsafe { + match N & 0b11 { + 0 => simd_shuffle!(a, b, [0, 1, 2, 3]), + 1 => simd_shuffle!(a, b, [1, 2, 3, 4]), + 2 => simd_shuffle!(a, b, [2, 3, 4, 5]), + 3 => simd_shuffle!(a, b, [3, 4, 5, 6]), + _ => unreachable_unchecked(), + } + } +} +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_u16)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 3))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ext, N = 3) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vext_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + static_assert_uimm_bits!(N, 2); + unsafe { + match N & 0b11 { + 0 => simd_shuffle!(a, b, [0, 1, 2, 3]), + 1 => simd_shuffle!(a, b, [1, 2, 3, 4]), + 2 => simd_shuffle!(a, b, [2, 3, 4, 5]), + 3 => simd_shuffle!(a, b, [3, 4, 5, 6]), + _ => unreachable_unchecked(), + } + } +} +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_u32)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 3))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ext, N = 3) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vextq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + static_assert_uimm_bits!(N, 2); + unsafe { + match N & 0b11 { + 0 => simd_shuffle!(a, b, [0, 1, 2, 3]), + 1 => simd_shuffle!(a, b, [1, 2, 3, 4]), + 2 => simd_shuffle!(a, b, [2, 3, 4, 5]), + 3 => simd_shuffle!(a, b, [3, 4, 5, 6]), + _ => unreachable_unchecked(), + } + } +} +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_p16)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 3))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ext, N = 3) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vext_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { + static_assert_uimm_bits!(N, 2); + unsafe { + match N & 0b11 { + 0 => simd_shuffle!(a, b, [0, 1, 2, 3]), + 1 => simd_shuffle!(a, b, [1, 2, 3, 4]), + 2 => simd_shuffle!(a, b, [2, 3, 4, 5]), + 3 => simd_shuffle!(a, b, [3, 4, 5, 6]), + _ => unreachable_unchecked(), + } + } +} +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_s64)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ext, N = 1) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vextq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { + static_assert_uimm_bits!(N, 1); + unsafe { + match N & 0b1 { + 0 => simd_shuffle!(a, b, [0, 1]), + 1 => simd_shuffle!(a, b, [1, 2]), + _ => unreachable_unchecked(), + } + } +} +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_u64)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ext, N = 1) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vextq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + static_assert_uimm_bits!(N, 1); + unsafe { + match N & 0b1 { + 0 => simd_shuffle!(a, b, [0, 1]), + 1 => simd_shuffle!(a, b, [1, 2]), + _ => unreachable_unchecked(), + } + } +} +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_s8)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 15))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ext, N = 15) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vextq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + static_assert_uimm_bits!(N, 4); + unsafe { + match N & 0b1111 { + 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), + 1 => simd_shuffle!( + a, + b, + [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16] + ), + 2 => simd_shuffle!( + a, + b, + [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17] + ), + 3 => simd_shuffle!( + a, + b, + [3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18] + ), + 4 => simd_shuffle!( + a, + b, + [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] + ), + 5 => simd_shuffle!( + a, + b, + [5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20] + ), + 6 => simd_shuffle!( + a, + b, + [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21] + ), + 7 => simd_shuffle!( + a, + b, + [7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22] + ), + 8 => simd_shuffle!( + a, + b, + [8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23] + ), + 9 => simd_shuffle!( + a, + b, + [9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24] + ), + 10 => simd_shuffle!( + a, + b, + [10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25] + ), + 11 => simd_shuffle!( + a, + b, + [11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26] + ), + 12 => simd_shuffle!( + a, + b, + [12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27] + ), + 13 => simd_shuffle!( + a, + b, + [13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28] + ), + 14 => simd_shuffle!( + a, + b, + [14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29] + ), + 15 => simd_shuffle!( + a, + b, + [15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30] + ), + _ => unreachable_unchecked(), + } + } +} +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_u8)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 15))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ext, N = 15) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vextq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + static_assert_uimm_bits!(N, 4); + unsafe { + match N & 0b1111 { + 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), + 1 => simd_shuffle!( + a, + b, + [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16] + ), + 2 => simd_shuffle!( + a, + b, + [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17] + ), + 3 => simd_shuffle!( + a, + b, + [3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18] + ), + 4 => simd_shuffle!( + a, + b, + [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] + ), + 5 => simd_shuffle!( + a, + b, + [5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20] + ), + 6 => simd_shuffle!( + a, + b, + [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21] + ), + 7 => simd_shuffle!( + a, + b, + [7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22] + ), + 8 => simd_shuffle!( + a, + b, + [8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23] + ), + 9 => simd_shuffle!( + a, + b, + [9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24] + ), + 10 => simd_shuffle!( + a, + b, + [10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25] + ), + 11 => simd_shuffle!( + a, + b, + [11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26] + ), + 12 => simd_shuffle!( + a, + b, + [12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27] + ), + 13 => simd_shuffle!( + a, + b, + [13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28] + ), + 14 => simd_shuffle!( + a, + b, + [14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29] + ), + 15 => simd_shuffle!( + a, + b, + [15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30] + ), + _ => unreachable_unchecked(), + } + } +} +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_p8)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 15))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ext, N = 15) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vextq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { + static_assert_uimm_bits!(N, 4); + unsafe { + match N & 0b1111 { + 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), + 1 => simd_shuffle!( + a, + b, + [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16] + ), + 2 => simd_shuffle!( + a, + b, + [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17] + ), + 3 => simd_shuffle!( + a, + b, + [3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18] + ), + 4 => simd_shuffle!( + a, + b, + [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] + ), + 5 => simd_shuffle!( + a, + b, + [5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20] + ), + 6 => simd_shuffle!( + a, + b, + [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21] + ), + 7 => simd_shuffle!( + a, + b, + [7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22] + ), + 8 => simd_shuffle!( + a, + b, + [8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23] + ), + 9 => simd_shuffle!( + a, + b, + [9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24] + ), + 10 => simd_shuffle!( + a, + b, + [10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25] + ), + 11 => simd_shuffle!( + a, + b, + [11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26] + ), + 12 => simd_shuffle!( + a, + b, + [12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27] + ), + 13 => simd_shuffle!( + a, + b, + [13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28] + ), + 14 => simd_shuffle!( + a, + b, + [14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29] + ), + 15 => simd_shuffle!( + a, + b, + [15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30] + ), + _ => unreachable_unchecked(), + } + } +} +#[doc = "Floating-point fused Multiply-Add to accumulator (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_f16)"] +#[inline] +#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfma))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fmla) +)] +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +pub fn vfma_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.fma.v4f16")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.fma.v4f16")] + fn _vfma_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t; + } + unsafe { _vfma_f16(b, c, a) } +} +#[doc = "Floating-point fused Multiply-Add to accumulator (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_f16)"] +#[inline] +#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfma))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fmla) +)] +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +pub fn vfmaq_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.fma.v8f16")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.fma.v8f16")] + fn _vfmaq_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t; + } + unsafe { _vfmaq_f16(b, c, a) } +} +#[doc = "Floating-point fused Multiply-Add to accumulator(vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_f32)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfma))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fmla) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vfma_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.fma.v2f32")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.fma.v2f32")] + fn _vfma_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t; + } + unsafe { _vfma_f32(b, c, a) } +} +#[doc = "Floating-point fused Multiply-Add to accumulator(vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_f32)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfma))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fmla) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vfmaq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.fma.v4f32")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.fma.v4f32")] + fn _vfmaq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t; + } + unsafe { _vfmaq_f32(b, c, a) } +} +#[doc = "Floating-point fused Multiply-Add to accumulator(vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_n_f32)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfma))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fmla) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vfma_n_f32(a: float32x2_t, b: float32x2_t, c: f32) -> float32x2_t { + vfma_f32(a, b, vdup_n_f32_vfp4(c)) +} +#[doc = "Floating-point fused Multiply-Add to accumulator(vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_n_f32)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfma))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fmla) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vfmaq_n_f32(a: float32x4_t, b: float32x4_t, c: f32) -> float32x4_t { + vfmaq_f32(a, b, vdupq_n_f32_vfp4(c)) +} +#[doc = "Floating-point fused multiply-subtract from accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_f16)"] +#[inline] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fmls) +)] +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +pub fn vfms_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t { + unsafe { + let b: float16x4_t = simd_neg(b); + vfma_f16(a, b, c) + } +} +#[doc = "Floating-point fused multiply-subtract from accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_f16)"] +#[inline] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fmls) +)] +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +pub fn vfmsq_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t { + unsafe { + let b: float16x8_t = simd_neg(b); + vfmaq_f16(a, b, c) + } +} +#[doc = "Floating-point fused multiply-subtract from accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_f32)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfms))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fmls) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vfms_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { + unsafe { + let b: float32x2_t = simd_neg(b); + vfma_f32(a, b, c) + } +} +#[doc = "Floating-point fused multiply-subtract from accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_f32)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfms))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fmls) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vfmsq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { + unsafe { + let b: float32x4_t = simd_neg(b); + vfmaq_f32(a, b, c) + } +} +#[doc = "Floating-point fused Multiply-subtract to accumulator(vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_n_f32)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfms))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fmls) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vfms_n_f32(a: float32x2_t, b: float32x2_t, c: f32) -> float32x2_t { + vfms_f32(a, b, vdup_n_f32_vfp4(c)) +} +#[doc = "Floating-point fused Multiply-subtract to accumulator(vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_n_f32)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfms))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fmls) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vfmsq_n_f32(a: float32x4_t, b: float32x4_t, c: f32) -> float32x4_t { + vfmsq_f32(a, b, vdupq_n_f32_vfp4(c)) +} +#[doc = "Duplicate vector element to vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_high_f16)"] +#[inline] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg_attr(test, assert_instr(nop))] +pub fn vget_high_f16(a: float16x8_t) -> float16x4_t { + unsafe { simd_shuffle!(a, a, [4, 5, 6, 7]) } +} +#[doc = "Duplicate vector element to vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_low_f16)"] +#[inline] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg_attr(test, assert_instr(nop))] +pub fn vget_low_f16(a: float16x8_t) -> float16x4_t { + unsafe { simd_shuffle!(a, a, [0, 1, 2, 3]) } +} +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_high_f32)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ext) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vget_high_f32(a: float32x4_t) -> float32x2_t { + unsafe { simd_shuffle!(a, a, [2, 3]) } +} +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_high_p16)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ext) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vget_high_p16(a: poly16x8_t) -> poly16x4_t { + unsafe { simd_shuffle!(a, a, [4, 5, 6, 7]) } +} +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_high_p8)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ext) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vget_high_p8(a: poly8x16_t) -> poly8x8_t { + unsafe { simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]) } +} +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_high_s16)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ext) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vget_high_s16(a: int16x8_t) -> int16x4_t { + unsafe { simd_shuffle!(a, a, [4, 5, 6, 7]) } +} +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_high_s32)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ext) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vget_high_s32(a: int32x4_t) -> int32x2_t { + unsafe { simd_shuffle!(a, a, [2, 3]) } +} +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_high_s8)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ext) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vget_high_s8(a: int8x16_t) -> int8x8_t { + unsafe { simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]) } +} +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_high_u16)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ext) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vget_high_u16(a: uint16x8_t) -> uint16x4_t { + unsafe { simd_shuffle!(a, a, [4, 5, 6, 7]) } +} +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_high_u32)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ext) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vget_high_u32(a: uint32x4_t) -> uint32x2_t { + unsafe { simd_shuffle!(a, a, [2, 3]) } +} +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_high_u8)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ext) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vget_high_u8(a: uint8x16_t) -> uint8x8_t { + unsafe { simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]) } +} +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_high_s64)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ext) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vget_high_s64(a: int64x2_t) -> int64x1_t { + unsafe { int64x1_t([simd_extract!(a, 1)]) } +} +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_high_u64)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ext) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vget_high_u64(a: uint64x2_t) -> uint64x1_t { + unsafe { uint64x1_t([simd_extract!(a, 1)]) } +} +#[doc = "Duplicate vector element to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_lane_f16)"] +#[inline] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[target_feature(enable = "neon,fp16")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop, LANE = 0) +)] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +pub fn vget_lane_f16(a: float16x4_t) -> f16 { + static_assert_uimm_bits!(LANE, 2); + unsafe { simd_extract!(a, LANE as u32) } +} +#[doc = "Duplicate vector element to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vgetq_lane_f16)"] +#[inline] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[target_feature(enable = "neon,fp16")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop, LANE = 0) +)] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +pub fn vgetq_lane_f16(a: float16x8_t) -> f16 { + static_assert_uimm_bits!(LANE, 3); + unsafe { simd_extract!(a, LANE as u32) } +} +#[doc = "Move vector element to general-purpose register"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_lane_f32)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(nop, IMM5 = 1))] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vget_lane_f32(v: float32x2_t) -> f32 { + static_assert_uimm_bits!(IMM5, 1); + unsafe { simd_extract!(v, IMM5 as u32) } +} +#[doc = "Move vector element to general-purpose register"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_lane_p16)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(nop, IMM5 = 2))] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vget_lane_p16(v: poly16x4_t) -> p16 { + static_assert_uimm_bits!(IMM5, 2); + unsafe { simd_extract!(v, IMM5 as u32) } +} +#[doc = "Move vector element to general-purpose register"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_lane_p8)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(nop, IMM5 = 2))] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vget_lane_p8(v: poly8x8_t) -> p8 { + static_assert_uimm_bits!(IMM5, 3); + unsafe { simd_extract!(v, IMM5 as u32) } +} +#[doc = "Move vector element to general-purpose register"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_lane_s16)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(nop, IMM5 = 2))] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vget_lane_s16(v: int16x4_t) -> i16 { + static_assert_uimm_bits!(IMM5, 2); + unsafe { simd_extract!(v, IMM5 as u32) } +} +#[doc = "Move vector element to general-purpose register"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_lane_s32)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(nop, IMM5 = 1))] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vget_lane_s32(v: int32x2_t) -> i32 { + static_assert_uimm_bits!(IMM5, 1); + unsafe { simd_extract!(v, IMM5 as u32) } +} +#[doc = "Move vector element to general-purpose register"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_lane_s8)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(nop, IMM5 = 2))] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vget_lane_s8(v: int8x8_t) -> i8 { + static_assert_uimm_bits!(IMM5, 3); + unsafe { simd_extract!(v, IMM5 as u32) } +} +#[doc = "Move vector element to general-purpose register"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_lane_u16)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(nop, IMM5 = 2))] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vget_lane_u16(v: uint16x4_t) -> u16 { + static_assert_uimm_bits!(IMM5, 2); + unsafe { simd_extract!(v, IMM5 as u32) } +} +#[doc = "Move vector element to general-purpose register"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_lane_u32)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(nop, IMM5 = 1))] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vget_lane_u32(v: uint32x2_t) -> u32 { + static_assert_uimm_bits!(IMM5, 1); + unsafe { simd_extract!(v, IMM5 as u32) } +} +#[doc = "Move vector element to general-purpose register"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_lane_u8)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(nop, IMM5 = 2))] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vget_lane_u8(v: uint8x8_t) -> u8 { + static_assert_uimm_bits!(IMM5, 3); + unsafe { simd_extract!(v, IMM5 as u32) } +} +#[doc = "Move vector element to general-purpose register"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vgetq_lane_f32)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(nop, IMM5 = 1))] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vgetq_lane_f32(v: float32x4_t) -> f32 { + static_assert_uimm_bits!(IMM5, 2); + unsafe { simd_extract!(v, IMM5 as u32) } +} +#[doc = "Move vector element to general-purpose register"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vgetq_lane_p16)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(nop, IMM5 = 2))] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vgetq_lane_p16(v: poly16x8_t) -> p16 { + static_assert_uimm_bits!(IMM5, 3); + unsafe { simd_extract!(v, IMM5 as u32) } +} +#[doc = "Move vector element to general-purpose register"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vgetq_lane_p64)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(nop, IMM5 = 1))] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vgetq_lane_p64(v: poly64x2_t) -> p64 { + static_assert_uimm_bits!(IMM5, 1); + unsafe { simd_extract!(v, IMM5 as u32) } +} +#[doc = "Move vector element to general-purpose register"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vgetq_lane_p8)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(nop, IMM5 = 2))] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vgetq_lane_p8(v: poly8x16_t) -> p8 { + static_assert_uimm_bits!(IMM5, 4); + unsafe { simd_extract!(v, IMM5 as u32) } +} +#[doc = "Move vector element to general-purpose register"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vgetq_lane_s16)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(nop, IMM5 = 2))] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vgetq_lane_s16(v: int16x8_t) -> i16 { + static_assert_uimm_bits!(IMM5, 3); + unsafe { simd_extract!(v, IMM5 as u32) } +} +#[doc = "Move vector element to general-purpose register"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vgetq_lane_s32)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(nop, IMM5 = 2))] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vgetq_lane_s32(v: int32x4_t) -> i32 { + static_assert_uimm_bits!(IMM5, 2); + unsafe { simd_extract!(v, IMM5 as u32) } +} +#[doc = "Move vector element to general-purpose register"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vgetq_lane_s64)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(nop, IMM5 = 1))] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vgetq_lane_s64(v: int64x2_t) -> i64 { + static_assert_uimm_bits!(IMM5, 1); + unsafe { simd_extract!(v, IMM5 as u32) } +} +#[doc = "Move vector element to general-purpose register"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vgetq_lane_s8)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(nop, IMM5 = 2))] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vgetq_lane_s8(v: int8x16_t) -> i8 { + static_assert_uimm_bits!(IMM5, 4); + unsafe { simd_extract!(v, IMM5 as u32) } +} +#[doc = "Move vector element to general-purpose register"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vgetq_lane_u16)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(nop, IMM5 = 2))] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vgetq_lane_u16(v: uint16x8_t) -> u16 { + static_assert_uimm_bits!(IMM5, 3); + unsafe { simd_extract!(v, IMM5 as u32) } +} +#[doc = "Move vector element to general-purpose register"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vgetq_lane_u32)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(nop, IMM5 = 2))] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vgetq_lane_u32(v: uint32x4_t) -> u32 { + static_assert_uimm_bits!(IMM5, 2); + unsafe { simd_extract!(v, IMM5 as u32) } +} +#[doc = "Move vector element to general-purpose register"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vgetq_lane_u64)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(nop, IMM5 = 1))] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vgetq_lane_u64(v: uint64x2_t) -> u64 { + static_assert_uimm_bits!(IMM5, 2); + unsafe { simd_extract!(v, IMM5 as u32) } +} +#[doc = "Move vector element to general-purpose register"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vgetq_lane_u8)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(nop, IMM5 = 2))] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vgetq_lane_u8(v: uint8x16_t) -> u8 { + static_assert_uimm_bits!(IMM5, 4); + unsafe { simd_extract!(v, IMM5 as u32) } +} +#[doc = "Move vector element to general-purpose register"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_lane_p64)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(nop, IMM5 = 0))] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vget_lane_p64(v: poly64x1_t) -> p64 { + static_assert!(IMM5 == 0); + unsafe { simd_extract!(v, IMM5 as u32) } +} +#[doc = "Move vector element to general-purpose register"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_lane_s64)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(nop, IMM5 = 0))] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vget_lane_s64(v: int64x1_t) -> i64 { + static_assert!(IMM5 == 0); + unsafe { simd_extract!(v, IMM5 as u32) } +} +#[doc = "Move vector element to general-purpose register"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_lane_u64)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(nop, IMM5 = 0))] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vget_lane_u64(v: uint64x1_t) -> u64 { + static_assert!(IMM5 == 0); + unsafe { simd_extract!(v, 0) } +} +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_low_f32)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(test, assert_instr(nop))] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vget_low_f32(a: float32x4_t) -> float32x2_t { + unsafe { simd_shuffle!(a, a, [0, 1]) } +} +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_low_p16)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(test, assert_instr(nop))] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vget_low_p16(a: poly16x8_t) -> poly16x4_t { + unsafe { simd_shuffle!(a, a, [0, 1, 2, 3]) } +} +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_low_p8)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(test, assert_instr(nop))] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vget_low_p8(a: poly8x16_t) -> poly8x8_t { + unsafe { simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]) } +} +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_low_s16)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(test, assert_instr(nop))] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vget_low_s16(a: int16x8_t) -> int16x4_t { + unsafe { simd_shuffle!(a, a, [0, 1, 2, 3]) } +} +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_low_s32)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(test, assert_instr(nop))] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vget_low_s32(a: int32x4_t) -> int32x2_t { + unsafe { simd_shuffle!(a, a, [0, 1]) } +} +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_low_s8)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(test, assert_instr(nop))] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vget_low_s8(a: int8x16_t) -> int8x8_t { + unsafe { simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]) } +} +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_low_u16)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(test, assert_instr(nop))] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vget_low_u16(a: uint16x8_t) -> uint16x4_t { + unsafe { simd_shuffle!(a, a, [0, 1, 2, 3]) } +} +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_low_u32)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(test, assert_instr(nop))] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vget_low_u32(a: uint32x4_t) -> uint32x2_t { + unsafe { simd_shuffle!(a, a, [0, 1]) } +} +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_low_u8)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(test, assert_instr(nop))] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vget_low_u8(a: uint8x16_t) -> uint8x8_t { + unsafe { simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]) } +} +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_low_s64)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(test, assert_instr(nop))] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vget_low_s64(a: int64x2_t) -> int64x1_t { + unsafe { int64x1_t([simd_extract!(a, 0)]) } +} +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_low_u64)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(test, assert_instr(nop))] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vget_low_u64(a: uint64x2_t) -> uint64x1_t { + unsafe { uint64x1_t([simd_extract!(a, 0)]) } +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhadd_s8)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.s8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(shadd) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vhadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.shadd.v8i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhadds.v8i8")] + fn _vhadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + } + unsafe { _vhadd_s8(a, b) } +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhaddq_s8)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.s8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(shadd) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vhaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.shadd.v16i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhadds.v16i8")] + fn _vhaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + } + unsafe { _vhaddq_s8(a, b) } +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhadd_s16)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.s16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(shadd) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vhadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.shadd.v4i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhadds.v4i16")] + fn _vhadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + } + unsafe { _vhadd_s16(a, b) } +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhaddq_s16)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.s16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(shadd) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vhaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.shadd.v8i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhadds.v8i16")] + fn _vhaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + } + unsafe { _vhaddq_s16(a, b) } +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhadd_s32)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.s32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(shadd) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vhadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.shadd.v2i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhadds.v2i32")] + fn _vhadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + } + unsafe { _vhadd_s32(a, b) } +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhaddq_s32)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.s32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(shadd) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vhaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.shadd.v4i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhadds.v4i32")] + fn _vhaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + } + unsafe { _vhaddq_s32(a, b) } +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhadd_u8)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.u8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uhadd) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vhadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uhadd.v8i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhaddu.v8i8")] + fn _vhadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t; + } + unsafe { _vhadd_u8(a, b) } +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhaddq_u8)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.u8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uhadd) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vhaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uhadd.v16i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhaddu.v16i8")] + fn _vhaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t; + } + unsafe { _vhaddq_u8(a, b) } +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhadd_u16)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.u16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uhadd) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vhadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uhadd.v4i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhaddu.v4i16")] + fn _vhadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t; + } + unsafe { _vhadd_u16(a, b) } +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhaddq_u16)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.u16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uhadd) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vhaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uhadd.v8i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhaddu.v8i16")] + fn _vhaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t; + } + unsafe { _vhaddq_u16(a, b) } +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhadd_u32)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.u32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uhadd) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vhadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uhadd.v2i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhaddu.v2i32")] + fn _vhadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t; + } + unsafe { _vhadd_u32(a, b) } +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhaddq_u32)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.u32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uhadd) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vhaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uhadd.v4i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhaddu.v4i32")] + fn _vhaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t; + } + unsafe { _vhaddq_u32(a, b) } +} +#[doc = "Signed halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsub_s16)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.s16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(shsub) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vhsub_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.shsub.v4i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubs.v4i16")] + fn _vhsub_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + } + unsafe { _vhsub_s16(a, b) } +} +#[doc = "Signed halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsubq_s16)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.s16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(shsub) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vhsubq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.shsub.v8i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubs.v8i16")] + fn _vhsubq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + } + unsafe { _vhsubq_s16(a, b) } +} +#[doc = "Signed halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsub_s32)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.s32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(shsub) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vhsub_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.shsub.v2i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubs.v2i32")] + fn _vhsub_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + } + unsafe { _vhsub_s32(a, b) } +} +#[doc = "Signed halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsubq_s32)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.s32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(shsub) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vhsubq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.shsub.v4i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubs.v4i32")] + fn _vhsubq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + } + unsafe { _vhsubq_s32(a, b) } +} +#[doc = "Signed halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsub_s8)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.s8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(shsub) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vhsub_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.shsub.v8i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubs.v8i8")] + fn _vhsub_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + } + unsafe { _vhsub_s8(a, b) } +} +#[doc = "Signed halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsubq_s8)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.s8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(shsub) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vhsubq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.shsub.v16i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubs.v16i8")] + fn _vhsubq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + } + unsafe { _vhsubq_s8(a, b) } +} +#[doc = "Signed halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsub_u8)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.u8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uhsub) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vhsub_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uhsub.v8i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubu.v8i8")] + fn _vhsub_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t; + } + unsafe { _vhsub_u8(a, b) } +} +#[doc = "Signed halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsubq_u8)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.u8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uhsub) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vhsubq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uhsub.v16i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubu.v16i8")] + fn _vhsubq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t; + } + unsafe { _vhsubq_u8(a, b) } +} +#[doc = "Signed halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsub_u16)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.u16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uhsub) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vhsub_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uhsub.v4i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubu.v4i16")] + fn _vhsub_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t; + } + unsafe { _vhsub_u16(a, b) } +} +#[doc = "Signed halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsubq_u16)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.u16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uhsub) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vhsubq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uhsub.v8i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubu.v8i16")] + fn _vhsubq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t; + } + unsafe { _vhsubq_u16(a, b) } +} +#[doc = "Signed halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsub_u32)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.u32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uhsub) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vhsub_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uhsub.v2i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubu.v2i32")] + fn _vhsub_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t; + } + unsafe { _vhsub_u32(a, b) } +} +#[doc = "Signed halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsubq_u32)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.u32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uhsub) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vhsubq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uhsub.v4i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubu.v4i32")] + fn _vhsubq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t; + } + unsafe { _vhsubq_u32(a, b) } +} +#[doc = "Load one single-element structure and replicate to all lanes of one register"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_dup_f16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld1r) +)] +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +pub unsafe fn vld1_dup_f16(ptr: *const f16) -> float16x4_t { + let x: float16x4_t = vld1_lane_f16::<0>(ptr, transmute(f16x4::splat(0.0))); + simd_shuffle!(x, x, [0, 0, 0, 0]) +} +#[doc = "Load one single-element structure and replicate to all lanes of one register"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_dup_f16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld1r) +)] +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +pub unsafe fn vld1q_dup_f16(ptr: *const f16) -> float16x8_t { + let x: float16x8_t = vld1q_lane_f16::<0>(ptr, transmute(f16x8::splat(0.0))); + simd_shuffle!(x, x, [0, 0, 0, 0, 0, 0, 0, 0]) +} +#[doc = "Load one single-element structure and Replicate to all lanes (of one register)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_dup_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld1r) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1_dup_f32(ptr: *const f32) -> float32x2_t { + let x = vld1_lane_f32::<0>(ptr, transmute(f32x2::splat(0.0))); + simd_shuffle!(x, x, [0, 0]) +} +#[doc = "Load one single-element structure and Replicate to all lanes (of one register)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_dup_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld1r) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1_dup_p16(ptr: *const p16) -> poly16x4_t { + let x = vld1_lane_p16::<0>(ptr, transmute(u16x4::splat(0))); + simd_shuffle!(x, x, [0, 0, 0, 0]) +} +#[doc = "Load one single-element structure and Replicate to all lanes (of one register)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_dup_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld1r) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1_dup_p8(ptr: *const p8) -> poly8x8_t { + let x = vld1_lane_p8::<0>(ptr, transmute(u8x8::splat(0))); + simd_shuffle!(x, x, [0, 0, 0, 0, 0, 0, 0, 0]) +} +#[doc = "Load one single-element structure and Replicate to all lanes (of one register)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_dup_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld1r) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1_dup_s16(ptr: *const i16) -> int16x4_t { + let x = vld1_lane_s16::<0>(ptr, transmute(i16x4::splat(0))); + simd_shuffle!(x, x, [0, 0, 0, 0]) +} +#[doc = "Load one single-element structure and Replicate to all lanes (of one register)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_dup_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld1r) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1_dup_s32(ptr: *const i32) -> int32x2_t { + let x = vld1_lane_s32::<0>(ptr, transmute(i32x2::splat(0))); + simd_shuffle!(x, x, [0, 0]) +} +#[doc = "Load one single-element structure and Replicate to all lanes (of one register)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_dup_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld1r) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1_dup_s8(ptr: *const i8) -> int8x8_t { + let x = vld1_lane_s8::<0>(ptr, transmute(i8x8::splat(0))); + simd_shuffle!(x, x, [0, 0, 0, 0, 0, 0, 0, 0]) +} +#[doc = "Load one single-element structure and Replicate to all lanes (of one register)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_dup_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld1r) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1_dup_u16(ptr: *const u16) -> uint16x4_t { + let x = vld1_lane_u16::<0>(ptr, transmute(u16x4::splat(0))); + simd_shuffle!(x, x, [0, 0, 0, 0]) +} +#[doc = "Load one single-element structure and Replicate to all lanes (of one register)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_dup_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld1r) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1_dup_u32(ptr: *const u32) -> uint32x2_t { + let x = vld1_lane_u32::<0>(ptr, transmute(u32x2::splat(0))); + simd_shuffle!(x, x, [0, 0]) +} +#[doc = "Load one single-element structure and Replicate to all lanes (of one register)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_dup_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld1r) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1_dup_u8(ptr: *const u8) -> uint8x8_t { + let x = vld1_lane_u8::<0>(ptr, transmute(u8x8::splat(0))); + simd_shuffle!(x, x, [0, 0, 0, 0, 0, 0, 0, 0]) +} +#[doc = "Load one single-element structure and Replicate to all lanes (of one register)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_dup_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld1r) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1q_dup_f32(ptr: *const f32) -> float32x4_t { + let x = vld1q_lane_f32::<0>(ptr, transmute(f32x4::splat(0.0))); + simd_shuffle!(x, x, [0, 0, 0, 0]) +} +#[doc = "Load one single-element structure and Replicate to all lanes (of one register)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_dup_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld1r) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1q_dup_p16(ptr: *const p16) -> poly16x8_t { + let x = vld1q_lane_p16::<0>(ptr, transmute(u16x8::splat(0))); + simd_shuffle!(x, x, [0, 0, 0, 0, 0, 0, 0, 0]) +} +#[doc = "Load one single-element structure and Replicate to all lanes (of one register)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_dup_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld1r) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1q_dup_p8(ptr: *const p8) -> poly8x16_t { + let x = vld1q_lane_p8::<0>(ptr, transmute(u8x16::splat(0))); + simd_shuffle!(x, x, [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) +} +#[doc = "Load one single-element structure and Replicate to all lanes (of one register)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_dup_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld1r) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1q_dup_s16(ptr: *const i16) -> int16x8_t { + let x = vld1q_lane_s16::<0>(ptr, transmute(i16x8::splat(0))); + simd_shuffle!(x, x, [0, 0, 0, 0, 0, 0, 0, 0]) +} +#[doc = "Load one single-element structure and Replicate to all lanes (of one register)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_dup_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld1r) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1q_dup_s32(ptr: *const i32) -> int32x4_t { + let x = vld1q_lane_s32::<0>(ptr, transmute(i32x4::splat(0))); + simd_shuffle!(x, x, [0, 0, 0, 0]) +} +#[doc = "Load one single-element structure and Replicate to all lanes (of one register)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_dup_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vldr"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld1) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1q_dup_s64(ptr: *const i64) -> int64x2_t { + let x = vld1q_lane_s64::<0>(ptr, transmute(i64x2::splat(0))); + simd_shuffle!(x, x, [0, 0]) +} +#[doc = "Load one single-element structure and Replicate to all lanes (of one register)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_dup_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld1r) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1q_dup_s8(ptr: *const i8) -> int8x16_t { + let x = vld1q_lane_s8::<0>(ptr, transmute(i8x16::splat(0))); + simd_shuffle!(x, x, [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) +} +#[doc = "Load one single-element structure and Replicate to all lanes (of one register)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_dup_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld1r) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1q_dup_u16(ptr: *const u16) -> uint16x8_t { + let x = vld1q_lane_u16::<0>(ptr, transmute(u16x8::splat(0))); + simd_shuffle!(x, x, [0, 0, 0, 0, 0, 0, 0, 0]) +} +#[doc = "Load one single-element structure and Replicate to all lanes (of one register)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_dup_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld1r) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1q_dup_u32(ptr: *const u32) -> uint32x4_t { + let x = vld1q_lane_u32::<0>(ptr, transmute(u32x4::splat(0))); + simd_shuffle!(x, x, [0, 0, 0, 0]) +} +#[doc = "Load one single-element structure and Replicate to all lanes (of one register)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_dup_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vldr"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld1) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1q_dup_u64(ptr: *const u64) -> uint64x2_t { + let x = vld1q_lane_u64::<0>(ptr, transmute(u64x2::splat(0))); + simd_shuffle!(x, x, [0, 0]) +} +#[doc = "Load one single-element structure and Replicate to all lanes (of one register)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_dup_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld1r) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1q_dup_u8(ptr: *const u8) -> uint8x16_t { + let x = vld1q_lane_u8::<0>(ptr, transmute(u8x16::splat(0))); + simd_shuffle!(x, x, [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) +} +#[doc = "Load one single-element structure and Replicate to all lanes (of one register)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_dup_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vldr))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ldr) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1_dup_p64(ptr: *const p64) -> poly64x1_t { + let x: poly64x1_t; + #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] + { + x = crate::core_arch::aarch64::vld1_p64(ptr); + } + #[cfg(target_arch = "arm")] + { + x = crate::core_arch::arm::vld1_p64(ptr); + }; + x +} +#[doc = "Load one single-element structure and Replicate to all lanes (of one register)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_dup_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vldr))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ldr) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1_dup_s64(ptr: *const i64) -> int64x1_t { + let x: int64x1_t; + #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] + { + x = crate::core_arch::aarch64::vld1_s64(ptr); + } + #[cfg(target_arch = "arm")] + { + x = crate::core_arch::arm::vld1_s64(ptr); + }; + x +} +#[doc = "Load one single-element structure and Replicate to all lanes (of one register)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_dup_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vldr))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ldr) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1_dup_u64(ptr: *const u64) -> uint64x1_t { + let x: uint64x1_t; + #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] + { + x = crate::core_arch::aarch64::vld1_u64(ptr); + } + #[cfg(target_arch = "arm")] + { + x = crate::core_arch::arm::vld1_u64(ptr); + }; + x +} +#[doc = "Load multiple single-element structures to one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))] +pub unsafe fn vld1_f16(ptr: *const f16) -> float16x4_t { + transmute(vld1_v4f16( + ptr as *const i8, + crate::mem::align_of::() as i32, + )) +} +#[doc = "Load multiple single-element structures to one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))] +pub unsafe fn vld1_f16(ptr: *const f16) -> float16x4_t { + let ret_val: float16x4_t = transmute(vld1_v4f16( + ptr as *const i8, + crate::mem::align_of::() as i32, + )); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +} +#[doc = "Load multiple single-element structures to one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))] +pub unsafe fn vld1q_f16(ptr: *const f16) -> float16x8_t { + transmute(vld1q_v8f16( + ptr as *const i8, + crate::mem::align_of::() as i32, + )) +} +#[doc = "Load multiple single-element structures to one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))] +pub unsafe fn vld1q_f16(ptr: *const f16) -> float16x8_t { + let ret_val: float16x8_t = transmute(vld1q_v8f16( + ptr as *const i8, + crate::mem::align_of::() as i32, + )); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +} +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f16_x2)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld1) +)] +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +pub unsafe fn vld1_f16_x2(a: *const f16) -> float16x4x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld1x2.v4f16.p0f16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x2.v4f16.p0f16")] + fn _vld1_f16_x2(a: *const f16) -> float16x4x2_t; + } + _vld1_f16_x2(a) +} +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f16_x3)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld1) +)] +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +pub unsafe fn vld1_f16_x3(a: *const f16) -> float16x4x3_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld1x3.v4f16.p0f16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x3.v4f16.p0f16")] + fn _vld1_f16_x3(a: *const f16) -> float16x4x3_t; + } + _vld1_f16_x3(a) +} +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f16_x4)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld1) +)] +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +pub unsafe fn vld1_f16_x4(a: *const f16) -> float16x4x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld1x4.v4f16.p0f16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x4.v4f16.p0f16")] + fn _vld1_f16_x4(a: *const f16) -> float16x4x4_t; + } + _vld1_f16_x4(a) +} +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f16_x2)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld1) +)] +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +pub unsafe fn vld1q_f16_x2(a: *const f16) -> float16x8x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld1x2.v8f16.p0f16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x2.v8f16.p0f16")] + fn _vld1q_f16_x2(a: *const f16) -> float16x8x2_t; + } + _vld1q_f16_x2(a) +} +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f16_x3)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld1) +)] +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +pub unsafe fn vld1q_f16_x3(a: *const f16) -> float16x8x3_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld1x3.v8f16.p0f16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x3.v8f16.p0f16")] + fn _vld1q_f16_x3(a: *const f16) -> float16x8x3_t; + } + _vld1q_f16_x3(a) +} +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f16_x4)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld1) +)] +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +pub unsafe fn vld1q_f16_x4(a: *const f16) -> float16x8x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld1x4.v8f16.p0f16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x4.v8f16.p0f16")] + fn _vld1q_f16_x4(a: *const f16) -> float16x8x4_t; + } + _vld1q_f16_x4(a) +} +#[doc = "Load multiple single-element structures to one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vldr))] +pub unsafe fn vld1_f32(ptr: *const f32) -> float32x2_t { + transmute(vld1_v2f32( + ptr as *const i8, + crate::mem::align_of::() as i32, + )) +} +#[doc = "Load multiple single-element structures to one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vldr))] pub unsafe fn vld1_f32(ptr: *const f32) -> float32x2_t { let ret_val: float32x2_t = transmute(vld1_v2f32( ptr as *const i8, @@ -11416,6 +15884,606 @@ pub unsafe fn vld1q_lane_f16(ptr: *const f16, src: float16x8_t) static_assert_uimm_bits!(LANE, 3); simd_insert!(src, LANE as u32, *ptr) } +#[doc = "Load one single-element structure to one lane of one register."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_lane_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld1, LANE = 1) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1_lane_f32(ptr: *const f32, src: float32x2_t) -> float32x2_t { + static_assert_uimm_bits!(LANE, 1); + simd_insert!(src, LANE as u32, *ptr) +} +#[doc = "Load one single-element structure to one lane of one register."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_lane_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16", LANE = 3))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld1, LANE = 3) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1_lane_p16(ptr: *const p16, src: poly16x4_t) -> poly16x4_t { + static_assert_uimm_bits!(LANE, 2); + simd_insert!(src, LANE as u32, *ptr) +} +#[doc = "Load one single-element structure to one lane of one register."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_lane_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8", LANE = 7))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld1, LANE = 7) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1_lane_p8(ptr: *const p8, src: poly8x8_t) -> poly8x8_t { + static_assert_uimm_bits!(LANE, 3); + simd_insert!(src, LANE as u32, *ptr) +} +#[doc = "Load one single-element structure to one lane of one register."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16", LANE = 3))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld1, LANE = 3) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1_lane_s16(ptr: *const i16, src: int16x4_t) -> int16x4_t { + static_assert_uimm_bits!(LANE, 2); + simd_insert!(src, LANE as u32, *ptr) +} +#[doc = "Load one single-element structure to one lane of one register."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld1, LANE = 1) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1_lane_s32(ptr: *const i32, src: int32x2_t) -> int32x2_t { + static_assert_uimm_bits!(LANE, 1); + simd_insert!(src, LANE as u32, *ptr) +} +#[doc = "Load one single-element structure to one lane of one register."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_lane_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vldr, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ldr, LANE = 0) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1_lane_s64(ptr: *const i64, src: int64x1_t) -> int64x1_t { + static_assert!(LANE == 0); + simd_insert!(src, LANE as u32, *ptr) +} +#[doc = "Load one single-element structure to one lane of one register."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_lane_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8", LANE = 7))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld1, LANE = 7) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1_lane_s8(ptr: *const i8, src: int8x8_t) -> int8x8_t { + static_assert_uimm_bits!(LANE, 3); + simd_insert!(src, LANE as u32, *ptr) +} +#[doc = "Load one single-element structure to one lane of one register."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_lane_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16", LANE = 3))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld1, LANE = 3) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1_lane_u16(ptr: *const u16, src: uint16x4_t) -> uint16x4_t { + static_assert_uimm_bits!(LANE, 2); + simd_insert!(src, LANE as u32, *ptr) +} +#[doc = "Load one single-element structure to one lane of one register."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_lane_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld1, LANE = 1) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1_lane_u32(ptr: *const u32, src: uint32x2_t) -> uint32x2_t { + static_assert_uimm_bits!(LANE, 1); + simd_insert!(src, LANE as u32, *ptr) +} +#[doc = "Load one single-element structure to one lane of one register."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_lane_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vldr, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ldr, LANE = 0) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1_lane_u64(ptr: *const u64, src: uint64x1_t) -> uint64x1_t { + static_assert!(LANE == 0); + simd_insert!(src, LANE as u32, *ptr) +} +#[doc = "Load one single-element structure to one lane of one register."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_lane_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8", LANE = 7))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld1, LANE = 7) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1_lane_u8(ptr: *const u8, src: uint8x8_t) -> uint8x8_t { + static_assert_uimm_bits!(LANE, 3); + simd_insert!(src, LANE as u32, *ptr) +} +#[doc = "Load one single-element structure to one lane of one register."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_lane_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32", LANE = 3))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld1, LANE = 3) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1q_lane_f32(ptr: *const f32, src: float32x4_t) -> float32x4_t { + static_assert_uimm_bits!(LANE, 2); + simd_insert!(src, LANE as u32, *ptr) +} +#[doc = "Load one single-element structure to one lane of one register."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_lane_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16", LANE = 7))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld1, LANE = 7) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1q_lane_p16(ptr: *const p16, src: poly16x8_t) -> poly16x8_t { + static_assert_uimm_bits!(LANE, 3); + simd_insert!(src, LANE as u32, *ptr) +} +#[doc = "Load one single-element structure to one lane of one register."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_lane_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8", LANE = 15))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld1, LANE = 15) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1q_lane_p8(ptr: *const p8, src: poly8x16_t) -> poly8x16_t { + static_assert_uimm_bits!(LANE, 4); + simd_insert!(src, LANE as u32, *ptr) +} +#[doc = "Load one single-element structure to one lane of one register."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16", LANE = 7))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld1, LANE = 7) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1q_lane_s16(ptr: *const i16, src: int16x8_t) -> int16x8_t { + static_assert_uimm_bits!(LANE, 3); + simd_insert!(src, LANE as u32, *ptr) +} +#[doc = "Load one single-element structure to one lane of one register."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32", LANE = 3))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld1, LANE = 3) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1q_lane_s32(ptr: *const i32, src: int32x4_t) -> int32x4_t { + static_assert_uimm_bits!(LANE, 2); + simd_insert!(src, LANE as u32, *ptr) +} +#[doc = "Load one single-element structure to one lane of one register."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_lane_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vldr, LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld1, LANE = 1) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1q_lane_s64(ptr: *const i64, src: int64x2_t) -> int64x2_t { + static_assert_uimm_bits!(LANE, 1); + simd_insert!(src, LANE as u32, *ptr) +} +#[doc = "Load one single-element structure to one lane of one register."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_lane_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8", LANE = 15))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld1, LANE = 15) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1q_lane_s8(ptr: *const i8, src: int8x16_t) -> int8x16_t { + static_assert_uimm_bits!(LANE, 4); + simd_insert!(src, LANE as u32, *ptr) +} +#[doc = "Load one single-element structure to one lane of one register."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_lane_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16", LANE = 7))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld1, LANE = 7) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1q_lane_u16(ptr: *const u16, src: uint16x8_t) -> uint16x8_t { + static_assert_uimm_bits!(LANE, 3); + simd_insert!(src, LANE as u32, *ptr) +} +#[doc = "Load one single-element structure to one lane of one register."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_lane_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32", LANE = 3))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld1, LANE = 3) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1q_lane_u32(ptr: *const u32, src: uint32x4_t) -> uint32x4_t { + static_assert_uimm_bits!(LANE, 2); + simd_insert!(src, LANE as u32, *ptr) +} +#[doc = "Load one single-element structure to one lane of one register."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_lane_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vldr, LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld1, LANE = 1) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1q_lane_u64(ptr: *const u64, src: uint64x2_t) -> uint64x2_t { + static_assert_uimm_bits!(LANE, 1); + simd_insert!(src, LANE as u32, *ptr) +} +#[doc = "Load one single-element structure to one lane of one register."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_lane_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8", LANE = 15))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld1, LANE = 15) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1q_lane_u8(ptr: *const u8, src: uint8x16_t) -> uint8x16_t { + static_assert_uimm_bits!(LANE, 4); + simd_insert!(src, LANE as u32, *ptr) +} +#[doc = "Load one single-element structure to one lane of one register."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_lane_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vldr, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ldr, LANE = 0) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1_lane_p64(ptr: *const p64, src: poly64x1_t) -> poly64x1_t { + static_assert!(LANE == 0); + simd_insert!(src, LANE as u32, *ptr) +} +#[doc = "Load one single-element structure to one lane of one register."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_lane_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vldr, LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld1, LANE = 1) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1q_lane_p64(ptr: *const p64, src: poly64x2_t) -> poly64x2_t { + static_assert_uimm_bits!(LANE, 1); + simd_insert!(src, LANE as u32, *ptr) +} #[doc = "Load multiple single-element structures to one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p64)"] #[doc = "## Safety"] @@ -14576,6 +19644,30 @@ unsafe fn vld1q_v8f16(a: *const i8, b: i32) -> float16x8_t { } _vld1q_v8f16(a, b) } +#[doc = "Load one single-element structure and Replicate to all lanes (of one register)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_dup_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vldr))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld1r) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1q_dup_p64(ptr: *const p64) -> poly64x2_t { + let x = vld1q_lane_p64::<0>(ptr, transmute(u64x2::splat(0))); + simd_shuffle!(x, x, [0, 0]) +} #[doc = "Load single 2-element structure and replicate to all lanes of two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_f16)"] #[doc = "## Safety"] @@ -18362,598 +23454,1233 @@ pub unsafe fn vld3q_s32(a: *const i32) -> int32x4x3_t { } _vld3q_s32(a as _) } -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_f32)"] +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld3))] +pub unsafe fn vld3_f32(a: *const f32) -> float32x2x3_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v2f32.p0")] + fn _vld3_f32(ptr: *const i8, size: i32) -> float32x2x3_t; + } + _vld3_f32(a as *const i8, 4) +} +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld3))] +pub unsafe fn vld3q_f32(a: *const f32) -> float32x4x3_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v4f32.p0")] + fn _vld3q_f32(ptr: *const i8, size: i32) -> float32x4x3_t; + } + _vld3q_f32(a as *const i8, 4) +} +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld3))] +pub unsafe fn vld3_s8(a: *const i8) -> int8x8x3_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v8i8.p0")] + fn _vld3_s8(ptr: *const i8, size: i32) -> int8x8x3_t; + } + _vld3_s8(a as *const i8, 1) +} +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld3))] +pub unsafe fn vld3q_s8(a: *const i8) -> int8x16x3_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v16i8.p0")] + fn _vld3q_s8(ptr: *const i8, size: i32) -> int8x16x3_t; + } + _vld3q_s8(a as *const i8, 1) +} +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld3))] +pub unsafe fn vld3_s16(a: *const i16) -> int16x4x3_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v4i16.p0")] + fn _vld3_s16(ptr: *const i8, size: i32) -> int16x4x3_t; + } + _vld3_s16(a as *const i8, 2) +} +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld3))] +pub unsafe fn vld3q_s16(a: *const i16) -> int16x8x3_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v8i16.p0")] + fn _vld3q_s16(ptr: *const i8, size: i32) -> int16x8x3_t; + } + _vld3q_s16(a as *const i8, 2) +} +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld3))] +pub unsafe fn vld3_s32(a: *const i32) -> int32x2x3_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v2i32.p0")] + fn _vld3_s32(ptr: *const i8, size: i32) -> int32x2x3_t; + } + _vld3_s32(a as *const i8, 4) +} +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld3))] +pub unsafe fn vld3q_s32(a: *const i32) -> int32x4x3_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v4i32.p0")] + fn _vld3q_s32(ptr: *const i8, size: i32) -> int32x4x3_t; + } + _vld3q_s32(a as *const i8, 4) +} +#[doc = "Load multiple 3-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_f16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +pub unsafe fn vld3_lane_f16(a: *const f16, b: float16x4x3_t) -> float16x4x3_t { + static_assert_uimm_bits!(LANE, 2); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v4f16.p0")] + fn _vld3_lane_f16( + ptr: *const f16, + a: float16x4_t, + b: float16x4_t, + c: float16x4_t, + n: i32, + size: i32, + ) -> float16x4x3_t; + } + _vld3_lane_f16(a as _, b.0, b.1, b.2, LANE, 2) +} +#[doc = "Load multiple 3-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_f16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +pub unsafe fn vld3q_lane_f16(a: *const f16, b: float16x8x3_t) -> float16x8x3_t { + static_assert_uimm_bits!(LANE, 3); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v8f16.p0")] + fn _vld3q_lane_f16( + ptr: *const f16, + a: float16x8_t, + b: float16x8_t, + c: float16x8_t, + n: i32, + size: i32, + ) -> float16x8x3_t; + } + _vld3q_lane_f16(a as _, b.0, b.1, b.2, LANE, 2) +} +#[doc = "Load multiple 3-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_f16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld3, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +pub unsafe fn vld3_lane_f16(a: *const f16, b: float16x4x3_t) -> float16x4x3_t { + static_assert_uimm_bits!(LANE, 2); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld3lane.v4f16.p0" + )] + fn _vld3_lane_f16( + a: float16x4_t, + b: float16x4_t, + c: float16x4_t, + n: i64, + ptr: *const f16, + ) -> float16x4x3_t; + } + _vld3_lane_f16(b.0, b.1, b.2, LANE as i64, a as _) +} +#[doc = "Load multiple 3-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_f16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld3, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +pub unsafe fn vld3q_lane_f16(a: *const f16, b: float16x8x3_t) -> float16x8x3_t { + static_assert_uimm_bits!(LANE, 3); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld3lane.v8f16.p0" + )] + fn _vld3q_lane_f16( + a: float16x8_t, + b: float16x8_t, + c: float16x8_t, + n: i64, + ptr: *const f16, + ) -> float16x8x3_t; + } + _vld3q_lane_f16(b.0, b.1, b.2, LANE as i64, a as _) +} +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld3_lane_f32(a: *const f32, b: float32x2x3_t) -> float32x2x3_t { + static_assert_uimm_bits!(LANE, 1); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld3lane.v2f32.p0" + )] + fn _vld3_lane_f32( + a: float32x2_t, + b: float32x2_t, + c: float32x2_t, + n: i64, + ptr: *const i8, + ) -> float32x2x3_t; + } + _vld3_lane_f32(b.0, b.1, b.2, LANE as i64, a as _) +} +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld3q_lane_f32(a: *const f32, b: float32x4x3_t) -> float32x4x3_t { + static_assert_uimm_bits!(LANE, 2); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld3lane.v4f32.p0" + )] + fn _vld3q_lane_f32( + a: float32x4_t, + b: float32x4_t, + c: float32x4_t, + n: i64, + ptr: *const i8, + ) -> float32x4x3_t; + } + _vld3q_lane_f32(b.0, b.1, b.2, LANE as i64, a as _) +} +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vld3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld3_lane_f32(a: *const f32, b: float32x2x3_t) -> float32x2x3_t { + static_assert_uimm_bits!(LANE, 1); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v2f32.p0")] + fn _vld3_lane_f32( + ptr: *const i8, + a: float32x2_t, + b: float32x2_t, + c: float32x2_t, + n: i32, + size: i32, + ) -> float32x2x3_t; + } + _vld3_lane_f32(a as _, b.0, b.1, b.2, LANE, 4) +} +#[doc = "Load multiple 3-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld3_lane_s8(a: *const i8, b: int8x8x3_t) -> int8x8x3_t { + static_assert_uimm_bits!(LANE, 3); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld3lane.v8i8.p0" + )] + fn _vld3_lane_s8( + a: int8x8_t, + b: int8x8_t, + c: int8x8_t, + n: i64, + ptr: *const i8, + ) -> int8x8x3_t; + } + _vld3_lane_s8(b.0, b.1, b.2, LANE as i64, a as _) +} +#[doc = "Load multiple 3-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld3_lane_s16(a: *const i16, b: int16x4x3_t) -> int16x4x3_t { + static_assert_uimm_bits!(LANE, 2); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld3lane.v4i16.p0" + )] + fn _vld3_lane_s16( + a: int16x4_t, + b: int16x4_t, + c: int16x4_t, + n: i64, + ptr: *const i8, + ) -> int16x4x3_t; + } + _vld3_lane_s16(b.0, b.1, b.2, LANE as i64, a as _) +} +#[doc = "Load multiple 3-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld3q_lane_s16(a: *const i16, b: int16x8x3_t) -> int16x8x3_t { + static_assert_uimm_bits!(LANE, 4); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld3lane.v8i16.p0" + )] + fn _vld3q_lane_s16( + a: int16x8_t, + b: int16x8_t, + c: int16x8_t, + n: i64, + ptr: *const i8, + ) -> int16x8x3_t; + } + _vld3q_lane_s16(b.0, b.1, b.2, LANE as i64, a as _) +} +#[doc = "Load multiple 3-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld3_lane_s32(a: *const i32, b: int32x2x3_t) -> int32x2x3_t { + static_assert_uimm_bits!(LANE, 1); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld3lane.v2i32.p0" + )] + fn _vld3_lane_s32( + a: int32x2_t, + b: int32x2_t, + c: int32x2_t, + n: i64, + ptr: *const i8, + ) -> int32x2x3_t; + } + _vld3_lane_s32(b.0, b.1, b.2, LANE as i64, a as _) +} +#[doc = "Load multiple 3-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld3q_lane_s32(a: *const i32, b: int32x4x3_t) -> int32x4x3_t { + static_assert_uimm_bits!(LANE, 2); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld3lane.v4i32.p0" + )] + fn _vld3q_lane_s32( + a: int32x4_t, + b: int32x4_t, + c: int32x4_t, + n: i64, + ptr: *const i8, + ) -> int32x4x3_t; + } + _vld3q_lane_s32(b.0, b.1, b.2, LANE as i64, a as _) +} +#[doc = "Load multiple 3-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vld3, LANE = 0))] +#[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld3))] -pub unsafe fn vld3_f32(a: *const f32) -> float32x2x3_t { +pub unsafe fn vld3_lane_s8(a: *const i8, b: int8x8x3_t) -> int8x8x3_t { + static_assert_uimm_bits!(LANE, 3); unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v2f32.p0")] - fn _vld3_f32(ptr: *const i8, size: i32) -> float32x2x3_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v8i8.p0")] + fn _vld3_lane_s8( + ptr: *const i8, + a: int8x8_t, + b: int8x8_t, + c: int8x8_t, + n: i32, + size: i32, + ) -> int8x8x3_t; } - _vld3_f32(a as *const i8, 4) + _vld3_lane_s8(a as _, b.0, b.1, b.2, LANE, 1) } -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_f32)"] +#[doc = "Load multiple 3-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vld3, LANE = 0))] +#[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld3))] -pub unsafe fn vld3q_f32(a: *const f32) -> float32x4x3_t { +pub unsafe fn vld3_lane_s16(a: *const i16, b: int16x4x3_t) -> int16x4x3_t { + static_assert_uimm_bits!(LANE, 2); unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v4f32.p0")] - fn _vld3q_f32(ptr: *const i8, size: i32) -> float32x4x3_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v4i16.p0")] + fn _vld3_lane_s16( + ptr: *const i8, + a: int16x4_t, + b: int16x4_t, + c: int16x4_t, + n: i32, + size: i32, + ) -> int16x4x3_t; } - _vld3q_f32(a as *const i8, 4) + _vld3_lane_s16(a as _, b.0, b.1, b.2, LANE, 2) } -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_s8)"] +#[doc = "Load multiple 3-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vld3, LANE = 0))] +#[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld3))] -pub unsafe fn vld3_s8(a: *const i8) -> int8x8x3_t { +pub unsafe fn vld3q_lane_s16(a: *const i16, b: int16x8x3_t) -> int16x8x3_t { + static_assert_uimm_bits!(LANE, 3); unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v8i8.p0")] - fn _vld3_s8(ptr: *const i8, size: i32) -> int8x8x3_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v8i16.p0")] + fn _vld3q_lane_s16( + ptr: *const i8, + a: int16x8_t, + b: int16x8_t, + c: int16x8_t, + n: i32, + size: i32, + ) -> int16x8x3_t; } - _vld3_s8(a as *const i8, 1) + _vld3q_lane_s16(a as _, b.0, b.1, b.2, LANE, 2) } -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_s8)"] +#[doc = "Load multiple 3-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vld3, LANE = 0))] +#[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld3))] -pub unsafe fn vld3q_s8(a: *const i8) -> int8x16x3_t { +pub unsafe fn vld3_lane_s32(a: *const i32, b: int32x2x3_t) -> int32x2x3_t { + static_assert_uimm_bits!(LANE, 1); unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v16i8.p0")] - fn _vld3q_s8(ptr: *const i8, size: i32) -> int8x16x3_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v2i32.p0")] + fn _vld3_lane_s32( + ptr: *const i8, + a: int32x2_t, + b: int32x2_t, + c: int32x2_t, + n: i32, + size: i32, + ) -> int32x2x3_t; } - _vld3q_s8(a as *const i8, 1) + _vld3_lane_s32(a as _, b.0, b.1, b.2, LANE, 4) } -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_s16)"] +#[doc = "Load multiple 3-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vld3, LANE = 0))] +#[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld3))] -pub unsafe fn vld3_s16(a: *const i16) -> int16x4x3_t { +pub unsafe fn vld3q_lane_s32(a: *const i32, b: int32x4x3_t) -> int32x4x3_t { + static_assert_uimm_bits!(LANE, 2); unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v4i16.p0")] - fn _vld3_s16(ptr: *const i8, size: i32) -> int16x4x3_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v4i32.p0")] + fn _vld3q_lane_s32( + ptr: *const i8, + a: int32x4_t, + b: int32x4_t, + c: int32x4_t, + n: i32, + size: i32, + ) -> int32x4x3_t; } - _vld3_s16(a as *const i8, 2) + _vld3q_lane_s32(a as _, b.0, b.1, b.2, LANE, 4) } #[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld3))] -pub unsafe fn vld3q_s16(a: *const i16) -> int16x8x3_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v8i16.p0")] - fn _vld3q_s16(ptr: *const i8, size: i32) -> int16x8x3_t; - } - _vld3q_s16(a as *const i8, 2) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld3, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld3_lane_u8(a: *const u8, b: uint8x8x3_t) -> uint8x8x3_t { + static_assert_uimm_bits!(LANE, 3); + transmute(vld3_lane_s8::(transmute(a), transmute(b))) } #[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld3))] -pub unsafe fn vld3_s32(a: *const i32) -> int32x2x3_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v2i32.p0")] - fn _vld3_s32(ptr: *const i8, size: i32) -> int32x2x3_t; - } - _vld3_s32(a as *const i8, 4) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld3, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld3_lane_u16(a: *const u16, b: uint16x4x3_t) -> uint16x4x3_t { + static_assert_uimm_bits!(LANE, 2); + transmute(vld3_lane_s16::(transmute(a), transmute(b))) } #[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld3))] -pub unsafe fn vld3q_s32(a: *const i32) -> int32x4x3_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v4i32.p0")] - fn _vld3q_s32(ptr: *const i8, size: i32) -> int32x4x3_t; - } - _vld3q_s32(a as *const i8, 4) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld3, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld3q_lane_u16(a: *const u16, b: uint16x8x3_t) -> uint16x8x3_t { + static_assert_uimm_bits!(LANE, 3); + transmute(vld3q_lane_s16::(transmute(a), transmute(b))) } -#[doc = "Load multiple 3-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_f16)"] +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld3, LANE = 0) +)] #[rustc_legacy_const_generics(2)] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub unsafe fn vld3_lane_f16(a: *const f16, b: float16x4x3_t) -> float16x4x3_t { +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld3_lane_u32(a: *const u32, b: uint32x2x3_t) -> uint32x2x3_t { + static_assert_uimm_bits!(LANE, 1); + transmute(vld3_lane_s32::(transmute(a), transmute(b))) +} +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld3, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld3q_lane_u32(a: *const u32, b: uint32x4x3_t) -> uint32x4x3_t { static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v4f16.p0")] - fn _vld3_lane_f16( - ptr: *const f16, - a: float16x4_t, - b: float16x4_t, - c: float16x4_t, - n: i32, - size: i32, - ) -> float16x4x3_t; - } - _vld3_lane_f16(a as _, b.0, b.1, b.2, LANE, 2) + transmute(vld3q_lane_s32::(transmute(a), transmute(b))) } -#[doc = "Load multiple 3-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_f16)"] +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld3, LANE = 0) +)] #[rustc_legacy_const_generics(2)] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub unsafe fn vld3q_lane_f16(a: *const f16, b: float16x8x3_t) -> float16x8x3_t { +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld3_lane_p8(a: *const p8, b: poly8x8x3_t) -> poly8x8x3_t { static_assert_uimm_bits!(LANE, 3); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v8f16.p0")] - fn _vld3q_lane_f16( - ptr: *const f16, - a: float16x8_t, - b: float16x8_t, - c: float16x8_t, - n: i32, - size: i32, - ) -> float16x8x3_t; - } - _vld3q_lane_f16(a as _, b.0, b.1, b.2, LANE, 2) + transmute(vld3_lane_s8::(transmute(a), transmute(b))) } -#[doc = "Load multiple 3-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_f16)"] +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(not(target_arch = "arm"))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(ld3, LANE = 0) )] #[rustc_legacy_const_generics(2)] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub unsafe fn vld3_lane_f16(a: *const f16, b: float16x4x3_t) -> float16x4x3_t { +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld3_lane_p16(a: *const p16, b: poly16x4x3_t) -> poly16x4x3_t { static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3lane.v4f16.p0" - )] - fn _vld3_lane_f16( - a: float16x4_t, - b: float16x4_t, - c: float16x4_t, - n: i64, - ptr: *const f16, - ) -> float16x4x3_t; - } - _vld3_lane_f16(b.0, b.1, b.2, LANE as i64, a as _) + transmute(vld3_lane_s16::(transmute(a), transmute(b))) } -#[doc = "Load multiple 3-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_f16)"] +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(not(target_arch = "arm"))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(ld3, LANE = 0) )] #[rustc_legacy_const_generics(2)] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub unsafe fn vld3q_lane_f16(a: *const f16, b: float16x8x3_t) -> float16x8x3_t { +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld3q_lane_p16(a: *const p16, b: poly16x8x3_t) -> poly16x8x3_t { static_assert_uimm_bits!(LANE, 3); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3lane.v8f16.p0" - )] - fn _vld3q_lane_f16( - a: float16x8_t, - b: float16x8_t, - c: float16x8_t, - n: i64, - ptr: *const f16, - ) -> float16x8x3_t; - } - _vld3q_lane_f16(b.0, b.1, b.2, LANE as i64, a as _) + transmute(vld3q_lane_s16::(transmute(a), transmute(b))) } #[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld3_p64(a: *const p64) -> poly64x1x3_t { + transmute(vld3_s64(transmute(a))) +} +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld3, LANE = 0))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld3_lane_f32(a: *const f32, b: float32x2x3_t) -> float32x2x3_t { - static_assert_uimm_bits!(LANE, 1); +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vld3_s64(a: *const i64) -> int64x1x3_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3lane.v2f32.p0" + link_name = "llvm.aarch64.neon.ld3.v1i64.p0" )] - fn _vld3_lane_f32( - a: float32x2_t, - b: float32x2_t, - c: float32x2_t, - n: i64, - ptr: *const i8, - ) -> float32x2x3_t; + fn _vld3_s64(ptr: *const int64x1_t) -> int64x1x3_t; } - _vld3_lane_f32(b.0, b.1, b.2, LANE as i64, a as _) + _vld3_s64(a as _) +} +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vld3_s64(a: *const i64) -> int64x1x3_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v1i64.p0")] + fn _vld3_s64(ptr: *const i8, size: i32) -> int64x1x3_t; + } + _vld3_s64(a as *const i8, 8) +} +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld3_u64(a: *const u64) -> uint64x1x3_t { + transmute(vld3_s64(transmute(a))) +} +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld3) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld3_u8(a: *const u8) -> uint8x8x3_t { + transmute(vld3_s8(transmute(a))) +} +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld3) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld3_u8(a: *const u8) -> uint8x8x3_t { + let mut ret_val: uint8x8x3_t = transmute(vld3_s8(transmute(a))); + ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]) }; + ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]) }; + ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]) }; + ret_val } #[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld3, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld3q_lane_f32(a: *const f32, b: float32x4x3_t) -> float32x4x3_t { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3lane.v4f32.p0" - )] - fn _vld3q_lane_f32( - a: float32x4_t, - b: float32x4_t, - c: float32x4_t, - n: i64, - ptr: *const i8, - ) -> float32x4x3_t; - } - _vld3q_lane_f32(b.0, b.1, b.2, LANE as i64, a as _) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld3) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld3q_u8(a: *const u8) -> uint8x16x3_t { + transmute(vld3q_s8(transmute(a))) } #[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vld3, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld3_lane_f32(a: *const f32, b: float32x2x3_t) -> float32x2x3_t { - static_assert_uimm_bits!(LANE, 1); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v2f32.p0")] - fn _vld3_lane_f32( - ptr: *const i8, - a: float32x2_t, - b: float32x2_t, - c: float32x2_t, - n: i32, - size: i32, - ) -> float32x2x3_t; - } - _vld3_lane_f32(a as _, b.0, b.1, b.2, LANE, 4) -} -#[doc = "Load multiple 3-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld3, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld3_lane_s8(a: *const i8, b: int8x8x3_t) -> int8x8x3_t { - static_assert_uimm_bits!(LANE, 3); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3lane.v8i8.p0" - )] - fn _vld3_lane_s8( - a: int8x8_t, - b: int8x8_t, - c: int8x8_t, - n: i64, - ptr: *const i8, - ) -> int8x8x3_t; - } - _vld3_lane_s8(b.0, b.1, b.2, LANE as i64, a as _) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld3) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld3q_u8(a: *const u8) -> uint8x16x3_t { + let mut ret_val: uint8x16x3_t = transmute(vld3q_s8(transmute(a))); + ret_val.0 = unsafe { + simd_shuffle!( + ret_val.0, + ret_val.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) + }; + ret_val.1 = unsafe { + simd_shuffle!( + ret_val.1, + ret_val.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) + }; + ret_val.2 = unsafe { + simd_shuffle!( + ret_val.2, + ret_val.2, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) + }; + ret_val } -#[doc = "Load multiple 3-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_s16)"] +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld3, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld3_lane_s16(a: *const i16, b: int16x4x3_t) -> int16x4x3_t { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3lane.v4i16.p0" - )] - fn _vld3_lane_s16( - a: int16x4_t, - b: int16x4_t, - c: int16x4_t, - n: i64, - ptr: *const i8, - ) -> int16x4x3_t; - } - _vld3_lane_s16(b.0, b.1, b.2, LANE as i64, a as _) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld3) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld3_u16(a: *const u16) -> uint16x4x3_t { + transmute(vld3_s16(transmute(a))) } -#[doc = "Load multiple 3-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_s16)"] +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld3, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld3q_lane_s16(a: *const i16, b: int16x8x3_t) -> int16x8x3_t { - static_assert_uimm_bits!(LANE, 4); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3lane.v8i16.p0" - )] - fn _vld3q_lane_s16( - a: int16x8_t, - b: int16x8_t, - c: int16x8_t, - n: i64, - ptr: *const i8, - ) -> int16x8x3_t; - } - _vld3q_lane_s16(b.0, b.1, b.2, LANE as i64, a as _) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld3) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld3_u16(a: *const u16) -> uint16x4x3_t { + let mut ret_val: uint16x4x3_t = transmute(vld3_s16(transmute(a))); + ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]) }; + ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]) }; + ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [3, 2, 1, 0]) }; + ret_val } -#[doc = "Load multiple 3-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_s32)"] +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld3, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld3_lane_s32(a: *const i32, b: int32x2x3_t) -> int32x2x3_t { - static_assert_uimm_bits!(LANE, 1); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3lane.v2i32.p0" - )] - fn _vld3_lane_s32( - a: int32x2_t, - b: int32x2_t, - c: int32x2_t, - n: i64, - ptr: *const i8, - ) -> int32x2x3_t; - } - _vld3_lane_s32(b.0, b.1, b.2, LANE as i64, a as _) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld3) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld3q_u16(a: *const u16) -> uint16x8x3_t { + transmute(vld3q_s16(transmute(a))) } -#[doc = "Load multiple 3-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_s32)"] +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld3, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld3q_lane_s32(a: *const i32, b: int32x4x3_t) -> int32x4x3_t { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3lane.v4i32.p0" - )] - fn _vld3q_lane_s32( - a: int32x4_t, - b: int32x4_t, - c: int32x4_t, - n: i64, - ptr: *const i8, - ) -> int32x4x3_t; - } - _vld3q_lane_s32(b.0, b.1, b.2, LANE as i64, a as _) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld3) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld3q_u16(a: *const u16) -> uint16x8x3_t { + let mut ret_val: uint16x8x3_t = transmute(vld3q_s16(transmute(a))); + ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]) }; + ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]) }; + ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]) }; + ret_val } -#[doc = "Load multiple 3-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_s8)"] +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vld3, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld3_lane_s8(a: *const i8, b: int8x8x3_t) -> int8x8x3_t { - static_assert_uimm_bits!(LANE, 3); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v8i8.p0")] - fn _vld3_lane_s8( - ptr: *const i8, - a: int8x8_t, - b: int8x8_t, - c: int8x8_t, - n: i32, - size: i32, - ) -> int8x8x3_t; - } - _vld3_lane_s8(a as _, b.0, b.1, b.2, LANE, 1) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld3) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld3_u32(a: *const u32) -> uint32x2x3_t { + transmute(vld3_s32(transmute(a))) } -#[doc = "Load multiple 3-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_s16)"] +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vld3, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld3_lane_s16(a: *const i16, b: int16x4x3_t) -> int16x4x3_t { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v4i16.p0")] - fn _vld3_lane_s16( - ptr: *const i8, - a: int16x4_t, - b: int16x4_t, - c: int16x4_t, - n: i32, - size: i32, - ) -> int16x4x3_t; - } - _vld3_lane_s16(a as _, b.0, b.1, b.2, LANE, 2) +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld3) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld3_u32(a: *const u32) -> uint32x2x3_t { + let mut ret_val: uint32x2x3_t = transmute(vld3_s32(transmute(a))); + ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) }; + ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) }; + ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [1, 0]) }; + ret_val } -#[doc = "Load multiple 3-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_s16)"] +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vld3, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld3q_lane_s16(a: *const i16, b: int16x8x3_t) -> int16x8x3_t { - static_assert_uimm_bits!(LANE, 3); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v8i16.p0")] - fn _vld3q_lane_s16( - ptr: *const i8, - a: int16x8_t, - b: int16x8_t, - c: int16x8_t, - n: i32, - size: i32, - ) -> int16x8x3_t; - } - _vld3q_lane_s16(a as _, b.0, b.1, b.2, LANE, 2) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld3) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld3q_u32(a: *const u32) -> uint32x4x3_t { + transmute(vld3q_s32(transmute(a))) } -#[doc = "Load multiple 3-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_s32)"] +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vld3, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld3_lane_s32(a: *const i32, b: int32x2x3_t) -> int32x2x3_t { - static_assert_uimm_bits!(LANE, 1); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v2i32.p0")] - fn _vld3_lane_s32( - ptr: *const i8, - a: int32x2_t, - b: int32x2_t, - c: int32x2_t, - n: i32, - size: i32, - ) -> int32x2x3_t; - } - _vld3_lane_s32(a as _, b.0, b.1, b.2, LANE, 4) +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld3) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld3q_u32(a: *const u32) -> uint32x4x3_t { + let mut ret_val: uint32x4x3_t = transmute(vld3q_s32(transmute(a))); + ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]) }; + ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]) }; + ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [3, 2, 1, 0]) }; + ret_val } -#[doc = "Load multiple 3-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_s32)"] +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vld3, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld3q_lane_s32(a: *const i32, b: int32x4x3_t) -> int32x4x3_t { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v4i32.p0")] - fn _vld3q_lane_s32( - ptr: *const i8, - a: int32x4_t, - b: int32x4_t, - c: int32x4_t, - n: i32, - size: i32, - ) -> int32x4x3_t; - } - _vld3q_lane_s32(a as _, b.0, b.1, b.2, LANE, 4) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld3) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld3_p8(a: *const p8) -> poly8x8x3_t { + transmute(vld3_s8(transmute(a))) } #[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3, LANE = 0) + assert_instr(ld3) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -18962,23 +24689,26 @@ pub unsafe fn vld3q_lane_s32(a: *const i32, b: int32x4x3_t) -> target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld3_lane_u8(a: *const u8, b: uint8x8x3_t) -> uint8x8x3_t { - static_assert_uimm_bits!(LANE, 3); - transmute(vld3_lane_s8::(transmute(a), transmute(b))) +pub unsafe fn vld3_p8(a: *const p8) -> poly8x8x3_t { + let mut ret_val: poly8x8x3_t = transmute(vld3_s8(transmute(a))); + ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]) }; + ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]) }; + ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]) }; + ret_val } #[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3, LANE = 0) + assert_instr(ld3) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -18987,23 +24717,22 @@ pub unsafe fn vld3_lane_u8(a: *const u8, b: uint8x8x3_t) -> uin target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld3_lane_u16(a: *const u16, b: uint16x4x3_t) -> uint16x4x3_t { - static_assert_uimm_bits!(LANE, 2); - transmute(vld3_lane_s16::(transmute(a), transmute(b))) +pub unsafe fn vld3q_p8(a: *const p8) -> poly8x16x3_t { + transmute(vld3q_s8(transmute(a))) } #[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3, LANE = 0) + assert_instr(ld3) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -19012,23 +24741,44 @@ pub unsafe fn vld3_lane_u16(a: *const u16, b: uint16x4x3_t) -> target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld3q_lane_u16(a: *const u16, b: uint16x8x3_t) -> uint16x8x3_t { - static_assert_uimm_bits!(LANE, 3); - transmute(vld3q_lane_s16::(transmute(a), transmute(b))) +pub unsafe fn vld3q_p8(a: *const p8) -> poly8x16x3_t { + let mut ret_val: poly8x16x3_t = transmute(vld3q_s8(transmute(a))); + ret_val.0 = unsafe { + simd_shuffle!( + ret_val.0, + ret_val.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) + }; + ret_val.1 = unsafe { + simd_shuffle!( + ret_val.1, + ret_val.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) + }; + ret_val.2 = unsafe { + simd_shuffle!( + ret_val.2, + ret_val.2, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) + }; + ret_val } #[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3, LANE = 0) + assert_instr(ld3) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -19037,23 +24787,22 @@ pub unsafe fn vld3q_lane_u16(a: *const u16, b: uint16x8x3_t) -> target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld3_lane_u32(a: *const u32, b: uint32x2x3_t) -> uint32x2x3_t { - static_assert_uimm_bits!(LANE, 1); - transmute(vld3_lane_s32::(transmute(a), transmute(b))) +pub unsafe fn vld3_p16(a: *const p16) -> poly16x4x3_t { + transmute(vld3_s16(transmute(a))) } #[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3, LANE = 0) + assert_instr(ld3) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -19062,23 +24811,26 @@ pub unsafe fn vld3_lane_u32(a: *const u32, b: uint32x2x3_t) -> target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld3q_lane_u32(a: *const u32, b: uint32x4x3_t) -> uint32x4x3_t { - static_assert_uimm_bits!(LANE, 2); - transmute(vld3q_lane_s32::(transmute(a), transmute(b))) +pub unsafe fn vld3_p16(a: *const p16) -> poly16x4x3_t { + let mut ret_val: poly16x4x3_t = transmute(vld3_s16(transmute(a))); + ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]) }; + ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]) }; + ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [3, 2, 1, 0]) }; + ret_val } #[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3, LANE = 0) + assert_instr(ld3) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -19087,23 +24839,22 @@ pub unsafe fn vld3q_lane_u32(a: *const u32, b: uint32x4x3_t) -> target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld3_lane_p8(a: *const p8, b: poly8x8x3_t) -> poly8x8x3_t { - static_assert_uimm_bits!(LANE, 3); - transmute(vld3_lane_s8::(transmute(a), transmute(b))) +pub unsafe fn vld3q_p16(a: *const p16) -> poly16x8x3_t { + transmute(vld3q_s16(transmute(a))) } #[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3, LANE = 0) + assert_instr(ld3) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -19112,37 +24863,417 @@ pub unsafe fn vld3_lane_p8(a: *const p8, b: poly8x8x3_t) -> pol target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld3_lane_p16(a: *const p16, b: poly16x4x3_t) -> poly16x4x3_t { - static_assert_uimm_bits!(LANE, 2); - transmute(vld3_lane_s16::(transmute(a), transmute(b))) +pub unsafe fn vld3q_p16(a: *const p16) -> poly16x8x3_t { + let mut ret_val: poly16x8x3_t = transmute(vld3q_s16(transmute(a))); + ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]) }; + ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]) }; + ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]) }; + ret_val } #[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vld3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld3q_lane_f32(a: *const f32, b: float32x4x3_t) -> float32x4x3_t { + static_assert_uimm_bits!(LANE, 2); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v4f32.p0")] + fn _vld3q_lane_f32( + ptr: *const i8, + a: float32x4_t, + b: float32x4_t, + c: float32x4_t, + n: i32, + size: i32, + ) -> float32x4x3_t; + } + _vld3q_lane_f32(a as _, b.0, b.1, b.2, LANE, 4) +} +#[doc = "Load single 4-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_f16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] +#[cfg(target_arch = "arm")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +pub unsafe fn vld4_dup_f16(a: *const f16) -> float16x4x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v4f16.p0f16")] + fn _vld4_dup_f16(ptr: *const f16, size: i32) -> float16x4x4_t; + } + _vld4_dup_f16(a as _, 2) +} +#[doc = "Load single 4-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_f16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg(target_arch = "arm")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +pub unsafe fn vld4q_dup_f16(a: *const f16) -> float16x8x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v8f16.p0f16")] + fn _vld4q_dup_f16(ptr: *const f16, size: i32) -> float16x8x4_t; + } + _vld4q_dup_f16(a as _, 2) +} +#[doc = "Load single 4-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_f16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(not(target_arch = "arm"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + assert_instr(ld4r) )] +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +pub unsafe fn vld4_dup_f16(a: *const f16) -> float16x4x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4r.v4f16.p0f16" + )] + fn _vld4_dup_f16(ptr: *const f16) -> float16x4x4_t; + } + _vld4_dup_f16(a as _) +} +#[doc = "Load single 4-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_f16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(not(target_arch = "arm"))] #[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld4r) )] -pub unsafe fn vld3q_lane_p16(a: *const p16, b: poly16x8x3_t) -> poly16x8x3_t { - static_assert_uimm_bits!(LANE, 3); - transmute(vld3q_lane_s16::(transmute(a), transmute(b))) +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +pub unsafe fn vld4q_dup_f16(a: *const f16) -> float16x8x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4r.v8f16.p0f16" + )] + fn _vld4q_dup_f16(ptr: *const f16) -> float16x8x4_t; + } + _vld4q_dup_f16(a as _) +} +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vld4))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld4_dup_f32(a: *const f32) -> float32x2x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v2f32.p0")] + fn _vld4_dup_f32(ptr: *const i8, size: i32) -> float32x2x4_t; + } + _vld4_dup_f32(a as *const i8, 4) +} +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vld4))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld4q_dup_f32(a: *const f32) -> float32x4x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v4f32.p0")] + fn _vld4q_dup_f32(ptr: *const i8, size: i32) -> float32x4x4_t; + } + _vld4q_dup_f32(a as *const i8, 4) +} +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vld4))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld4_dup_s8(a: *const i8) -> int8x8x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v8i8.p0")] + fn _vld4_dup_s8(ptr: *const i8, size: i32) -> int8x8x4_t; + } + _vld4_dup_s8(a as *const i8, 1) +} +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vld4))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld4q_dup_s8(a: *const i8) -> int8x16x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v16i8.p0")] + fn _vld4q_dup_s8(ptr: *const i8, size: i32) -> int8x16x4_t; + } + _vld4q_dup_s8(a as *const i8, 1) +} +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vld4))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld4_dup_s16(a: *const i16) -> int16x4x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v4i16.p0")] + fn _vld4_dup_s16(ptr: *const i8, size: i32) -> int16x4x4_t; + } + _vld4_dup_s16(a as *const i8, 2) +} +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vld4))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld4q_dup_s16(a: *const i16) -> int16x8x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v8i16.p0")] + fn _vld4q_dup_s16(ptr: *const i8, size: i32) -> int16x8x4_t; + } + _vld4q_dup_s16(a as *const i8, 2) +} +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vld4))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld4_dup_s32(a: *const i32) -> int32x2x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v2i32.p0")] + fn _vld4_dup_s32(ptr: *const i8, size: i32) -> int32x2x4_t; + } + _vld4_dup_s32(a as *const i8, 4) +} +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vld4))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld4q_dup_s32(a: *const i32) -> int32x4x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v4i32.p0")] + fn _vld4q_dup_s32(ptr: *const i8, size: i32) -> int32x4x4_t; + } + _vld4q_dup_s32(a as *const i8, 4) +} +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld4r))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld4_dup_f32(a: *const f32) -> float32x2x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4r.v2f32.p0f32.p0" + )] + fn _vld4_dup_f32(ptr: *const f32) -> float32x2x4_t; + } + _vld4_dup_f32(a as _) +} +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld4r))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld4q_dup_f32(a: *const f32) -> float32x4x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4r.v4f32.p0f32.p0" + )] + fn _vld4q_dup_f32(ptr: *const f32) -> float32x4x4_t; + } + _vld4q_dup_f32(a as _) +} +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld4r))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld4_dup_s8(a: *const i8) -> int8x8x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4r.v8i8.p0i8.p0" + )] + fn _vld4_dup_s8(ptr: *const i8) -> int8x8x4_t; + } + _vld4_dup_s8(a as _) +} +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld4r))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld4q_dup_s8(a: *const i8) -> int8x16x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4r.v16i8.p0i8.p0" + )] + fn _vld4q_dup_s8(ptr: *const i8) -> int8x16x4_t; + } + _vld4q_dup_s8(a as _) +} +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld4r))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld4_dup_s16(a: *const i16) -> int16x4x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4r.v4i16.p0i16.p0" + )] + fn _vld4_dup_s16(ptr: *const i16) -> int16x4x4_t; + } + _vld4_dup_s16(a as _) +} +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld4r))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld4q_dup_s16(a: *const i16) -> int16x8x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4r.v8i16.p0i16.p0" + )] + fn _vld4q_dup_s16(ptr: *const i16) -> int16x8x4_t; + } + _vld4q_dup_s16(a as _) +} +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld4r))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld4_dup_s32(a: *const i32) -> int32x2x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4r.v2i32.p0i32.p0" + )] + fn _vld4_dup_s32(ptr: *const i32) -> int32x2x4_t; + } + _vld4_dup_s32(a as _) +} +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld4r))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld4q_dup_s32(a: *const i32) -> int32x4x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4r.v4i32.p0i32.p0" + )] + fn _vld4q_dup_s32(ptr: *const i32) -> int32x4x4_t; + } + _vld4q_dup_s32(a as _) +} +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld4r))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld4_dup_s64(a: *const i64) -> int64x1x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4r.v1i64.p0i64.p0" + )] + fn _vld4_dup_s64(ptr: *const i64) -> int64x1x4_t; + } + _vld4_dup_s64(a as _) } -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_p64)"] +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] @@ -19151,7 +25282,7 @@ pub unsafe fn vld3q_lane_p16(a: *const p16, b: poly16x8x3_t) -> #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(ld4r) )] #[cfg_attr( not(target_arch = "arm"), @@ -19161,46 +25292,27 @@ pub unsafe fn vld3q_lane_p16(a: *const p16, b: poly16x8x3_t) -> target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld3_p64(a: *const p64) -> poly64x1x3_t { - transmute(vld3_s64(transmute(a))) -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vld3_s64(a: *const i64) -> int64x1x3_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3.v1i64.p0" - )] - fn _vld3_s64(ptr: *const int64x1_t) -> int64x1x3_t; - } - _vld3_s64(a as _) +pub unsafe fn vld4_dup_p64(a: *const p64) -> poly64x1x4_t { + transmute(vld4_dup_s64(transmute(a))) } -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_s64)"] +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vld3_s64(a: *const i64) -> int64x1x3_t { +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld4_dup_s64(a: *const i64) -> int64x1x4_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v1i64.p0")] - fn _vld3_s64(ptr: *const i8, size: i32) -> int64x1x3_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v1i64.p0")] + fn _vld4_dup_s64(ptr: *const i8, size: i32) -> int64x1x4_t; } - _vld3_s64(a as *const i8, 8) + _vld4_dup_s64(a as *const i8, 8) } -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_u64)"] +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] @@ -19209,7 +25321,7 @@ pub unsafe fn vld3_s64(a: *const i64) -> int64x1x3_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(ld4r) )] #[cfg_attr( not(target_arch = "arm"), @@ -19219,21 +25331,21 @@ pub unsafe fn vld3_s64(a: *const i64) -> int64x1x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld3_u64(a: *const u64) -> uint64x1x3_t { - transmute(vld3_s64(transmute(a))) +pub unsafe fn vld4_dup_u64(a: *const u64) -> uint64x1x4_t { + transmute(vld4_dup_s64(transmute(a))) } -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_u8)"] +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3) + assert_instr(ld4r) )] #[cfg_attr( not(target_arch = "arm"), @@ -19243,21 +25355,21 @@ pub unsafe fn vld3_u64(a: *const u64) -> uint64x1x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld3_u8(a: *const u8) -> uint8x8x3_t { - transmute(vld3_s8(transmute(a))) +pub unsafe fn vld4_dup_u8(a: *const u8) -> uint8x8x4_t { + transmute(vld4_dup_s8(transmute(a))) } -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_u8)"] +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3) + assert_instr(ld4r) )] #[cfg_attr( not(target_arch = "arm"), @@ -19267,25 +25379,26 @@ pub unsafe fn vld3_u8(a: *const u8) -> uint8x8x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld3_u8(a: *const u8) -> uint8x8x3_t { - let mut ret_val: uint8x8x3_t = transmute(vld3_s8(transmute(a))); +pub unsafe fn vld4_dup_u8(a: *const u8) -> uint8x8x4_t { + let mut ret_val: uint8x8x4_t = transmute(vld4_dup_s8(transmute(a))); ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]) }; ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]) }; ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]) }; + ret_val.3 = unsafe { simd_shuffle!(ret_val.3, ret_val.3, [7, 6, 5, 4, 3, 2, 1, 0]) }; ret_val } -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_u8)"] +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3) + assert_instr(ld4r) )] #[cfg_attr( not(target_arch = "arm"), @@ -19295,21 +25408,21 @@ pub unsafe fn vld3_u8(a: *const u8) -> uint8x8x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld3q_u8(a: *const u8) -> uint8x16x3_t { - transmute(vld3q_s8(transmute(a))) +pub unsafe fn vld4q_dup_u8(a: *const u8) -> uint8x16x4_t { + transmute(vld4q_dup_s8(transmute(a))) } -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_u8)"] +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3) + assert_instr(ld4r) )] #[cfg_attr( not(target_arch = "arm"), @@ -19319,8 +25432,8 @@ pub unsafe fn vld3q_u8(a: *const u8) -> uint8x16x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld3q_u8(a: *const u8) -> uint8x16x3_t { - let mut ret_val: uint8x16x3_t = transmute(vld3q_s8(transmute(a))); +pub unsafe fn vld4q_dup_u8(a: *const u8) -> uint8x16x4_t { + let mut ret_val: uint8x16x4_t = transmute(vld4q_dup_s8(transmute(a))); ret_val.0 = unsafe { simd_shuffle!( ret_val.0, @@ -19342,20 +25455,27 @@ pub unsafe fn vld3q_u8(a: *const u8) -> uint8x16x3_t { [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] ) }; + ret_val.3 = unsafe { + simd_shuffle!( + ret_val.3, + ret_val.3, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) + }; ret_val } -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_u16)"] +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3) + assert_instr(ld4r) )] #[cfg_attr( not(target_arch = "arm"), @@ -19365,21 +25485,21 @@ pub unsafe fn vld3q_u8(a: *const u8) -> uint8x16x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld3_u16(a: *const u16) -> uint16x4x3_t { - transmute(vld3_s16(transmute(a))) +pub unsafe fn vld4_dup_u16(a: *const u16) -> uint16x4x4_t { + transmute(vld4_dup_s16(transmute(a))) } -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_u16)"] +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3) + assert_instr(ld4r) )] #[cfg_attr( not(target_arch = "arm"), @@ -19389,25 +25509,26 @@ pub unsafe fn vld3_u16(a: *const u16) -> uint16x4x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld3_u16(a: *const u16) -> uint16x4x3_t { - let mut ret_val: uint16x4x3_t = transmute(vld3_s16(transmute(a))); +pub unsafe fn vld4_dup_u16(a: *const u16) -> uint16x4x4_t { + let mut ret_val: uint16x4x4_t = transmute(vld4_dup_s16(transmute(a))); ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]) }; ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]) }; ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [3, 2, 1, 0]) }; + ret_val.3 = unsafe { simd_shuffle!(ret_val.3, ret_val.3, [3, 2, 1, 0]) }; ret_val } -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_u16)"] +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3) + assert_instr(ld4r) )] #[cfg_attr( not(target_arch = "arm"), @@ -19417,21 +25538,21 @@ pub unsafe fn vld3_u16(a: *const u16) -> uint16x4x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld3q_u16(a: *const u16) -> uint16x8x3_t { - transmute(vld3q_s16(transmute(a))) +pub unsafe fn vld4q_dup_u16(a: *const u16) -> uint16x8x4_t { + transmute(vld4q_dup_s16(transmute(a))) } -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_u16)"] +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3) + assert_instr(ld4r) )] #[cfg_attr( not(target_arch = "arm"), @@ -19441,25 +25562,26 @@ pub unsafe fn vld3q_u16(a: *const u16) -> uint16x8x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld3q_u16(a: *const u16) -> uint16x8x3_t { - let mut ret_val: uint16x8x3_t = transmute(vld3q_s16(transmute(a))); +pub unsafe fn vld4q_dup_u16(a: *const u16) -> uint16x8x4_t { + let mut ret_val: uint16x8x4_t = transmute(vld4q_dup_s16(transmute(a))); ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]) }; ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]) }; ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]) }; + ret_val.3 = unsafe { simd_shuffle!(ret_val.3, ret_val.3, [7, 6, 5, 4, 3, 2, 1, 0]) }; ret_val } -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_u32)"] +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3) + assert_instr(ld4r) )] #[cfg_attr( not(target_arch = "arm"), @@ -19469,21 +25591,21 @@ pub unsafe fn vld3q_u16(a: *const u16) -> uint16x8x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld3_u32(a: *const u32) -> uint32x2x3_t { - transmute(vld3_s32(transmute(a))) +pub unsafe fn vld4_dup_u32(a: *const u32) -> uint32x2x4_t { + transmute(vld4_dup_s32(transmute(a))) } -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_u32)"] +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3) + assert_instr(ld4r) )] #[cfg_attr( not(target_arch = "arm"), @@ -19493,25 +25615,26 @@ pub unsafe fn vld3_u32(a: *const u32) -> uint32x2x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld3_u32(a: *const u32) -> uint32x2x3_t { - let mut ret_val: uint32x2x3_t = transmute(vld3_s32(transmute(a))); +pub unsafe fn vld4_dup_u32(a: *const u32) -> uint32x2x4_t { + let mut ret_val: uint32x2x4_t = transmute(vld4_dup_s32(transmute(a))); ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) }; ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) }; ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [1, 0]) }; + ret_val.3 = unsafe { simd_shuffle!(ret_val.3, ret_val.3, [1, 0]) }; ret_val } -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_u32)"] +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3) + assert_instr(ld4r) )] #[cfg_attr( not(target_arch = "arm"), @@ -19521,21 +25644,21 @@ pub unsafe fn vld3_u32(a: *const u32) -> uint32x2x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld3q_u32(a: *const u32) -> uint32x4x3_t { - transmute(vld3q_s32(transmute(a))) +pub unsafe fn vld4q_dup_u32(a: *const u32) -> uint32x4x4_t { + transmute(vld4q_dup_s32(transmute(a))) } -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_u32)"] +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3) + assert_instr(ld4r) )] #[cfg_attr( not(target_arch = "arm"), @@ -19545,25 +25668,26 @@ pub unsafe fn vld3q_u32(a: *const u32) -> uint32x4x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld3q_u32(a: *const u32) -> uint32x4x3_t { - let mut ret_val: uint32x4x3_t = transmute(vld3q_s32(transmute(a))); +pub unsafe fn vld4q_dup_u32(a: *const u32) -> uint32x4x4_t { + let mut ret_val: uint32x4x4_t = transmute(vld4q_dup_s32(transmute(a))); ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]) }; ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]) }; ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [3, 2, 1, 0]) }; + ret_val.3 = unsafe { simd_shuffle!(ret_val.3, ret_val.3, [3, 2, 1, 0]) }; ret_val } -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_p8)"] +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3) + assert_instr(ld4r) )] #[cfg_attr( not(target_arch = "arm"), @@ -19573,21 +25697,21 @@ pub unsafe fn vld3q_u32(a: *const u32) -> uint32x4x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld3_p8(a: *const p8) -> poly8x8x3_t { - transmute(vld3_s8(transmute(a))) +pub unsafe fn vld4_dup_p8(a: *const p8) -> poly8x8x4_t { + transmute(vld4_dup_s8(transmute(a))) } -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_p8)"] +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3) + assert_instr(ld4r) )] #[cfg_attr( not(target_arch = "arm"), @@ -19597,25 +25721,26 @@ pub unsafe fn vld3_p8(a: *const p8) -> poly8x8x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld3_p8(a: *const p8) -> poly8x8x3_t { - let mut ret_val: poly8x8x3_t = transmute(vld3_s8(transmute(a))); +pub unsafe fn vld4_dup_p8(a: *const p8) -> poly8x8x4_t { + let mut ret_val: poly8x8x4_t = transmute(vld4_dup_s8(transmute(a))); ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]) }; ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]) }; ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]) }; + ret_val.3 = unsafe { simd_shuffle!(ret_val.3, ret_val.3, [7, 6, 5, 4, 3, 2, 1, 0]) }; ret_val } -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_p8)"] +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3) + assert_instr(ld4r) )] #[cfg_attr( not(target_arch = "arm"), @@ -19625,21 +25750,21 @@ pub unsafe fn vld3_p8(a: *const p8) -> poly8x8x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld3q_p8(a: *const p8) -> poly8x16x3_t { - transmute(vld3q_s8(transmute(a))) +pub unsafe fn vld4q_dup_p8(a: *const p8) -> poly8x16x4_t { + transmute(vld4q_dup_s8(transmute(a))) } -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_p8)"] +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3) + assert_instr(ld4r) )] #[cfg_attr( not(target_arch = "arm"), @@ -19649,8 +25774,8 @@ pub unsafe fn vld3q_p8(a: *const p8) -> poly8x16x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld3q_p8(a: *const p8) -> poly8x16x3_t { - let mut ret_val: poly8x16x3_t = transmute(vld3q_s8(transmute(a))); +pub unsafe fn vld4q_dup_p8(a: *const p8) -> poly8x16x4_t { + let mut ret_val: poly8x16x4_t = transmute(vld4q_dup_s8(transmute(a))); ret_val.0 = unsafe { simd_shuffle!( ret_val.0, @@ -19672,20 +25797,27 @@ pub unsafe fn vld3q_p8(a: *const p8) -> poly8x16x3_t { [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] ) }; + ret_val.3 = unsafe { + simd_shuffle!( + ret_val.3, + ret_val.3, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) + }; ret_val } -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_p16)"] +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3) + assert_instr(ld4r) )] #[cfg_attr( not(target_arch = "arm"), @@ -19695,21 +25827,21 @@ pub unsafe fn vld3q_p8(a: *const p8) -> poly8x16x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld3_p16(a: *const p16) -> poly16x4x3_t { - transmute(vld3_s16(transmute(a))) +pub unsafe fn vld4_dup_p16(a: *const p16) -> poly16x4x4_t { + transmute(vld4_dup_s16(transmute(a))) } -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_p16)"] +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3) + assert_instr(ld4r) )] #[cfg_attr( not(target_arch = "arm"), @@ -19719,25 +25851,26 @@ pub unsafe fn vld3_p16(a: *const p16) -> poly16x4x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld3_p16(a: *const p16) -> poly16x4x3_t { - let mut ret_val: poly16x4x3_t = transmute(vld3_s16(transmute(a))); +pub unsafe fn vld4_dup_p16(a: *const p16) -> poly16x4x4_t { + let mut ret_val: poly16x4x4_t = transmute(vld4_dup_s16(transmute(a))); ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]) }; ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]) }; ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [3, 2, 1, 0]) }; + ret_val.3 = unsafe { simd_shuffle!(ret_val.3, ret_val.3, [3, 2, 1, 0]) }; ret_val } -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_p16)"] +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3) + assert_instr(ld4r) )] #[cfg_attr( not(target_arch = "arm"), @@ -19747,21 +25880,21 @@ pub unsafe fn vld3_p16(a: *const p16) -> poly16x4x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld3q_p16(a: *const p16) -> poly16x8x3_t { - transmute(vld3q_s16(transmute(a))) +pub unsafe fn vld4q_dup_p16(a: *const p16) -> poly16x8x4_t { + transmute(vld4q_dup_s16(transmute(a))) } -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_p16)"] +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3) + assert_instr(ld4r) )] #[cfg_attr( not(target_arch = "arm"), @@ -19771,40 +25904,33 @@ pub unsafe fn vld3q_p16(a: *const p16) -> poly16x8x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld3q_p16(a: *const p16) -> poly16x8x3_t { - let mut ret_val: poly16x8x3_t = transmute(vld3q_s16(transmute(a))); +pub unsafe fn vld4q_dup_p16(a: *const p16) -> poly16x8x4_t { + let mut ret_val: poly16x8x4_t = transmute(vld4q_dup_s16(transmute(a))); ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]) }; ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]) }; ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]) }; + ret_val.3 = unsafe { simd_shuffle!(ret_val.3, ret_val.3, [7, 6, 5, 4, 3, 2, 1, 0]) }; ret_val } -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_f32)"] +#[doc = "Load single 4-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_f16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vld3, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld3q_lane_f32(a: *const f32, b: float32x4x3_t) -> float32x4x3_t { - static_assert_uimm_bits!(LANE, 2); +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +pub unsafe fn vld4_f16(a: *const f16) -> float16x4x4_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v4f32.p0")] - fn _vld3q_lane_f32( - ptr: *const i8, - a: float32x4_t, - b: float32x4_t, - c: float32x4_t, - n: i32, - size: i32, - ) -> float32x4x3_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v4f16.p0f16")] + fn _vld4_f16(ptr: *const f16, size: i32) -> float16x4x4_t; } - _vld3q_lane_f32(a as _, b.0, b.1, b.2, LANE, 4) + _vld4_f16(a as _, 2) } #[doc = "Load single 4-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_f16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_f16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] @@ -19813,790 +25939,844 @@ pub unsafe fn vld3q_lane_f32(a: *const f32, b: float32x4x3_t) - #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub unsafe fn vld4_dup_f16(a: *const f16) -> float16x4x4_t { +pub unsafe fn vld4q_f16(a: *const f16) -> float16x8x4_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v4f16.p0f16")] - fn _vld4_dup_f16(ptr: *const f16, size: i32) -> float16x4x4_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v8f16.p0f16")] + fn _vld4q_f16(ptr: *const f16, size: i32) -> float16x8x4_t; } - _vld4_dup_f16(a as _, 2) + _vld4q_f16(a as _, 2) } #[doc = "Load single 4-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_f16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_f16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg(target_arch = "arm")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld4) +)] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub unsafe fn vld4q_dup_f16(a: *const f16) -> float16x8x4_t { +pub unsafe fn vld4_f16(a: *const f16) -> float16x4x4_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v8f16.p0f16")] - fn _vld4q_dup_f16(ptr: *const f16, size: i32) -> float16x8x4_t; + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4.v4f16.p0f16" + )] + fn _vld4_f16(ptr: *const f16) -> float16x4x4_t; } - _vld4q_dup_f16(a as _, 2) + _vld4_f16(a as _) } #[doc = "Load single 4-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_f16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_f16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(not(target_arch = "arm"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4r) + assert_instr(ld4) )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub unsafe fn vld4_dup_f16(a: *const f16) -> float16x4x4_t { +pub unsafe fn vld4q_f16(a: *const f16) -> float16x8x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4.v8f16.p0f16" + )] + fn _vld4q_f16(ptr: *const f16) -> float16x8x4_t; + } + _vld4q_f16(a as _) +} +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld4))] +pub unsafe fn vld4_f32(a: *const f32) -> float32x2x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4.v2f32.p0" + )] + fn _vld4_f32(ptr: *const float32x2_t) -> float32x2x4_t; + } + _vld4_f32(a as _) +} +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld4))] +pub unsafe fn vld4q_f32(a: *const f32) -> float32x4x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4.v4f32.p0" + )] + fn _vld4q_f32(ptr: *const float32x4_t) -> float32x4x4_t; + } + _vld4q_f32(a as _) +} +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld4))] +pub unsafe fn vld4_s8(a: *const i8) -> int8x8x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4.v8i8.p0" + )] + fn _vld4_s8(ptr: *const int8x8_t) -> int8x8x4_t; + } + _vld4_s8(a as _) +} +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld4))] +pub unsafe fn vld4q_s8(a: *const i8) -> int8x16x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4.v16i8.p0" + )] + fn _vld4q_s8(ptr: *const int8x16_t) -> int8x16x4_t; + } + _vld4q_s8(a as _) +} +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld4))] +pub unsafe fn vld4_s16(a: *const i16) -> int16x4x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4.v4i16.p0" + )] + fn _vld4_s16(ptr: *const int16x4_t) -> int16x4x4_t; + } + _vld4_s16(a as _) +} +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld4))] +pub unsafe fn vld4q_s16(a: *const i16) -> int16x8x4_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4r.v4f16.p0f16" + link_name = "llvm.aarch64.neon.ld4.v8i16.p0" )] - fn _vld4_dup_f16(ptr: *const f16) -> float16x4x4_t; + fn _vld4q_s16(ptr: *const int16x8_t) -> int16x8x4_t; } - _vld4_dup_f16(a as _) + _vld4q_s16(a as _) } -#[doc = "Load single 4-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_f16)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4r) -)] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub unsafe fn vld4q_dup_f16(a: *const f16) -> float16x8x4_t { +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld4))] +pub unsafe fn vld4_s32(a: *const i32) -> int32x2x4_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4r.v8f16.p0f16" + link_name = "llvm.aarch64.neon.ld4.v2i32.p0" )] - fn _vld4q_dup_f16(ptr: *const f16) -> float16x8x4_t; + fn _vld4_s32(ptr: *const int32x2_t) -> int32x2x4_t; } - _vld4q_dup_f16(a as _) + _vld4_s32(a as _) } -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_f32)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vld4))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld4_dup_f32(a: *const f32) -> float32x2x4_t { +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld4))] +pub unsafe fn vld4q_s32(a: *const i32) -> int32x4x4_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v2f32.p0")] - fn _vld4_dup_f32(ptr: *const i8, size: i32) -> float32x2x4_t; + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4.v4i32.p0" + )] + fn _vld4q_s32(ptr: *const int32x4_t) -> int32x4x4_t; } - _vld4_dup_f32(a as *const i8, 4) + _vld4q_s32(a as _) } -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_f32)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vld4))] +#[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld4q_dup_f32(a: *const f32) -> float32x4x4_t { +#[cfg_attr(test, assert_instr(vld4))] +pub unsafe fn vld4_f32(a: *const f32) -> float32x2x4_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v4f32.p0")] - fn _vld4q_dup_f32(ptr: *const i8, size: i32) -> float32x4x4_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v2f32.p0")] + fn _vld4_f32(ptr: *const i8, size: i32) -> float32x2x4_t; } - _vld4q_dup_f32(a as *const i8, 4) + _vld4_f32(a as *const i8, 4) } -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_s8)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vld4))] +#[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld4_dup_s8(a: *const i8) -> int8x8x4_t { +#[cfg_attr(test, assert_instr(vld4))] +pub unsafe fn vld4q_f32(a: *const f32) -> float32x4x4_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v8i8.p0")] - fn _vld4_dup_s8(ptr: *const i8, size: i32) -> int8x8x4_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v4f32.p0")] + fn _vld4q_f32(ptr: *const i8, size: i32) -> float32x4x4_t; } - _vld4_dup_s8(a as *const i8, 1) + _vld4q_f32(a as *const i8, 4) } -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_s8)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vld4))] +#[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld4q_dup_s8(a: *const i8) -> int8x16x4_t { +#[cfg_attr(test, assert_instr(vld4))] +pub unsafe fn vld4_s8(a: *const i8) -> int8x8x4_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v16i8.p0")] - fn _vld4q_dup_s8(ptr: *const i8, size: i32) -> int8x16x4_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v8i8.p0")] + fn _vld4_s8(ptr: *const i8, size: i32) -> int8x8x4_t; } - _vld4q_dup_s8(a as *const i8, 1) + _vld4_s8(a as *const i8, 1) } -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_s16)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vld4))] +#[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld4_dup_s16(a: *const i16) -> int16x4x4_t { +#[cfg_attr(test, assert_instr(vld4))] +pub unsafe fn vld4q_s8(a: *const i8) -> int8x16x4_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v4i16.p0")] - fn _vld4_dup_s16(ptr: *const i8, size: i32) -> int16x4x4_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v16i8.p0")] + fn _vld4q_s8(ptr: *const i8, size: i32) -> int8x16x4_t; } - _vld4_dup_s16(a as *const i8, 2) + _vld4q_s8(a as *const i8, 1) } -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_s16)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vld4))] +#[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld4q_dup_s16(a: *const i16) -> int16x8x4_t { +#[cfg_attr(test, assert_instr(vld4))] +pub unsafe fn vld4_s16(a: *const i16) -> int16x4x4_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v8i16.p0")] - fn _vld4q_dup_s16(ptr: *const i8, size: i32) -> int16x8x4_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v4i16.p0")] + fn _vld4_s16(ptr: *const i8, size: i32) -> int16x4x4_t; } - _vld4q_dup_s16(a as *const i8, 2) + _vld4_s16(a as *const i8, 2) } -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_s32)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vld4))] +#[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld4_dup_s32(a: *const i32) -> int32x2x4_t { +#[cfg_attr(test, assert_instr(vld4))] +pub unsafe fn vld4q_s16(a: *const i16) -> int16x8x4_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v2i32.p0")] - fn _vld4_dup_s32(ptr: *const i8, size: i32) -> int32x2x4_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v8i16.p0")] + fn _vld4q_s16(ptr: *const i8, size: i32) -> int16x8x4_t; } - _vld4_dup_s32(a as *const i8, 4) + _vld4q_s16(a as *const i8, 2) } -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_s32)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vld4))] +#[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld4q_dup_s32(a: *const i32) -> int32x4x4_t { +#[cfg_attr(test, assert_instr(vld4))] +pub unsafe fn vld4_s32(a: *const i32) -> int32x2x4_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v4i32.p0")] - fn _vld4q_dup_s32(ptr: *const i8, size: i32) -> int32x4x4_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v2i32.p0")] + fn _vld4_s32(ptr: *const i8, size: i32) -> int32x2x4_t; } - _vld4q_dup_s32(a as *const i8, 4) + _vld4_s32(a as *const i8, 4) } -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_f32)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld4r))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld4_dup_f32(a: *const f32) -> float32x2x4_t { +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld4))] +pub unsafe fn vld4q_s32(a: *const i32) -> int32x4x4_t { unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4r.v2f32.p0f32.p0" - )] - fn _vld4_dup_f32(ptr: *const f32) -> float32x2x4_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v4i32.p0")] + fn _vld4q_s32(ptr: *const i8, size: i32) -> int32x4x4_t; } - _vld4_dup_f32(a as _) + _vld4q_s32(a as *const i8, 4) } -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_f32)"] +#[doc = "Load multiple 4-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_f16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld4r))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld4q_dup_f32(a: *const f32) -> float32x4x4_t { +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +pub unsafe fn vld4_lane_f16(a: *const f16, b: float16x4x4_t) -> float16x4x4_t { + static_assert_uimm_bits!(LANE, 2); unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4r.v4f32.p0f32.p0" - )] - fn _vld4q_dup_f32(ptr: *const f32) -> float32x4x4_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v4f16.p0")] + fn _vld4_lane_f16( + ptr: *const f16, + a: float16x4_t, + b: float16x4_t, + c: float16x4_t, + d: float16x4_t, + n: i32, + size: i32, + ) -> float16x4x4_t; } - _vld4q_dup_f32(a as _) + _vld4_lane_f16(a as _, b.0, b.1, b.2, b.3, LANE, 2) } -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_s8)"] +#[doc = "Load multiple 4-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_f16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld4r))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld4_dup_s8(a: *const i8) -> int8x8x4_t { +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +pub unsafe fn vld4q_lane_f16(a: *const f16, b: float16x8x4_t) -> float16x8x4_t { + static_assert_uimm_bits!(LANE, 3); unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4r.v8i8.p0i8.p0" - )] - fn _vld4_dup_s8(ptr: *const i8) -> int8x8x4_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v8f16.p0")] + fn _vld4q_lane_f16( + ptr: *const f16, + a: float16x8_t, + b: float16x8_t, + c: float16x8_t, + d: float16x8_t, + n: i32, + size: i32, + ) -> float16x8x4_t; } - _vld4_dup_s8(a as _) + _vld4q_lane_f16(a as _, b.0, b.1, b.2, b.3, LANE, 2) } -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_s8)"] +#[doc = "Load multiple 4-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_f16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld4r))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld4q_dup_s8(a: *const i8) -> int8x16x4_t { +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld4, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +pub unsafe fn vld4_lane_f16(a: *const f16, b: float16x4x4_t) -> float16x4x4_t { + static_assert_uimm_bits!(LANE, 2); unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4r.v16i8.p0i8.p0" + link_name = "llvm.aarch64.neon.ld4lane.v4f16.p0" )] - fn _vld4q_dup_s8(ptr: *const i8) -> int8x16x4_t; + fn _vld4_lane_f16( + a: float16x4_t, + b: float16x4_t, + c: float16x4_t, + d: float16x4_t, + n: i64, + ptr: *const f16, + ) -> float16x4x4_t; } - _vld4q_dup_s8(a as _) + _vld4_lane_f16(b.0, b.1, b.2, b.3, LANE as i64, a as _) } -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_s16)"] +#[doc = "Load multiple 4-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_f16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld4r))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld4_dup_s16(a: *const i16) -> int16x4x4_t { +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld4, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +pub unsafe fn vld4q_lane_f16(a: *const f16, b: float16x8x4_t) -> float16x8x4_t { + static_assert_uimm_bits!(LANE, 3); unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4r.v4i16.p0i16.p0" + link_name = "llvm.aarch64.neon.ld4lane.v8f16.p0" )] - fn _vld4_dup_s16(ptr: *const i16) -> int16x4x4_t; + fn _vld4q_lane_f16( + a: float16x8_t, + b: float16x8_t, + c: float16x8_t, + d: float16x8_t, + n: i64, + ptr: *const f16, + ) -> float16x8x4_t; } - _vld4_dup_s16(a as _) + _vld4q_lane_f16(b.0, b.1, b.2, b.3, LANE as i64, a as _) } -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_s16)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld4r))] +#[cfg_attr(test, assert_instr(ld4, LANE = 0))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld4q_dup_s16(a: *const i16) -> int16x8x4_t { +pub unsafe fn vld4_lane_f32(a: *const f32, b: float32x2x4_t) -> float32x2x4_t { + static_assert_uimm_bits!(LANE, 1); unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4r.v8i16.p0i16.p0" + link_name = "llvm.aarch64.neon.ld4lane.v2f32.p0" )] - fn _vld4q_dup_s16(ptr: *const i16) -> int16x8x4_t; + fn _vld4_lane_f32( + a: float32x2_t, + b: float32x2_t, + c: float32x2_t, + d: float32x2_t, + n: i64, + ptr: *const i8, + ) -> float32x2x4_t; } - _vld4q_dup_s16(a as _) + _vld4_lane_f32(b.0, b.1, b.2, b.3, LANE as i64, a as _) } -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_s32)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld4r))] +#[cfg_attr(test, assert_instr(ld4, LANE = 0))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld4_dup_s32(a: *const i32) -> int32x2x4_t { +pub unsafe fn vld4q_lane_f32(a: *const f32, b: float32x4x4_t) -> float32x4x4_t { + static_assert_uimm_bits!(LANE, 2); unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4r.v2i32.p0i32.p0" + link_name = "llvm.aarch64.neon.ld4lane.v4f32.p0" )] - fn _vld4_dup_s32(ptr: *const i32) -> int32x2x4_t; + fn _vld4q_lane_f32( + a: float32x4_t, + b: float32x4_t, + c: float32x4_t, + d: float32x4_t, + n: i64, + ptr: *const i8, + ) -> float32x4x4_t; } - _vld4_dup_s32(a as _) + _vld4q_lane_f32(b.0, b.1, b.2, b.3, LANE as i64, a as _) } -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_s32)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld4r))] +#[cfg_attr(test, assert_instr(ld4, LANE = 0))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld4q_dup_s32(a: *const i32) -> int32x4x4_t { +pub unsafe fn vld4_lane_s8(a: *const i8, b: int8x8x4_t) -> int8x8x4_t { + static_assert_uimm_bits!(LANE, 3); unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4r.v4i32.p0i32.p0" + link_name = "llvm.aarch64.neon.ld4lane.v8i8.p0" )] - fn _vld4q_dup_s32(ptr: *const i32) -> int32x4x4_t; + fn _vld4_lane_s8( + a: int8x8_t, + b: int8x8_t, + c: int8x8_t, + d: int8x8_t, + n: i64, + ptr: *const i8, + ) -> int8x8x4_t; } - _vld4q_dup_s32(a as _) + _vld4_lane_s8(b.0, b.1, b.2, b.3, LANE as i64, a as _) } -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_s64)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld4r))] +#[cfg_attr(test, assert_instr(ld4, LANE = 0))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld4_dup_s64(a: *const i64) -> int64x1x4_t { +pub unsafe fn vld4_lane_s16(a: *const i16, b: int16x4x4_t) -> int16x4x4_t { + static_assert_uimm_bits!(LANE, 2); unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4r.v1i64.p0i64.p0" + link_name = "llvm.aarch64.neon.ld4lane.v4i16.p0" )] - fn _vld4_dup_s64(ptr: *const i64) -> int64x1x4_t; - } - _vld4_dup_s64(a as _) -} -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_p64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4r) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld4_dup_p64(a: *const p64) -> poly64x1x4_t { - transmute(vld4_dup_s64(transmute(a))) -} -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(nop))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld4_dup_s64(a: *const i64) -> int64x1x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v1i64.p0")] - fn _vld4_dup_s64(ptr: *const i8, size: i32) -> int64x1x4_t; - } - _vld4_dup_s64(a as *const i8, 8) -} -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4r) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld4_dup_u64(a: *const u64) -> uint64x1x4_t { - transmute(vld4_dup_s64(transmute(a))) -} -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4r) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld4_dup_u8(a: *const u8) -> uint8x8x4_t { - transmute(vld4_dup_s8(transmute(a))) -} -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4r) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld4_dup_u8(a: *const u8) -> uint8x8x4_t { - let mut ret_val: uint8x8x4_t = transmute(vld4_dup_s8(transmute(a))); - ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]) }; - ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]) }; - ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]) }; - ret_val.3 = unsafe { simd_shuffle!(ret_val.3, ret_val.3, [7, 6, 5, 4, 3, 2, 1, 0]) }; - ret_val + fn _vld4_lane_s16( + a: int16x4_t, + b: int16x4_t, + c: int16x4_t, + d: int16x4_t, + n: i64, + ptr: *const i8, + ) -> int16x4x4_t; + } + _vld4_lane_s16(b.0, b.1, b.2, b.3, LANE as i64, a as _) } -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_u8)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4r) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld4q_dup_u8(a: *const u8) -> uint8x16x4_t { - transmute(vld4q_dup_s8(transmute(a))) +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld4, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld4q_lane_s16(a: *const i16, b: int16x8x4_t) -> int16x8x4_t { + static_assert_uimm_bits!(LANE, 3); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4lane.v8i16.p0" + )] + fn _vld4q_lane_s16( + a: int16x8_t, + b: int16x8_t, + c: int16x8_t, + d: int16x8_t, + n: i64, + ptr: *const i8, + ) -> int16x8x4_t; + } + _vld4q_lane_s16(b.0, b.1, b.2, b.3, LANE as i64, a as _) } -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_u8)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4r) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld4q_dup_u8(a: *const u8) -> uint8x16x4_t { - let mut ret_val: uint8x16x4_t = transmute(vld4q_dup_s8(transmute(a))); - ret_val.0 = unsafe { - simd_shuffle!( - ret_val.0, - ret_val.0, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; - ret_val.1 = unsafe { - simd_shuffle!( - ret_val.1, - ret_val.1, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; - ret_val.2 = unsafe { - simd_shuffle!( - ret_val.2, - ret_val.2, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; - ret_val.3 = unsafe { - simd_shuffle!( - ret_val.3, - ret_val.3, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; - ret_val +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld4, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld4_lane_s32(a: *const i32, b: int32x2x4_t) -> int32x2x4_t { + static_assert_uimm_bits!(LANE, 1); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4lane.v2i32.p0" + )] + fn _vld4_lane_s32( + a: int32x2_t, + b: int32x2_t, + c: int32x2_t, + d: int32x2_t, + n: i64, + ptr: *const i8, + ) -> int32x2x4_t; + } + _vld4_lane_s32(b.0, b.1, b.2, b.3, LANE as i64, a as _) } -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_u16)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4r) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld4_dup_u16(a: *const u16) -> uint16x4x4_t { - transmute(vld4_dup_s16(transmute(a))) +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld4, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld4q_lane_s32(a: *const i32, b: int32x4x4_t) -> int32x4x4_t { + static_assert_uimm_bits!(LANE, 2); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4lane.v4i32.p0" + )] + fn _vld4q_lane_s32( + a: int32x4_t, + b: int32x4_t, + c: int32x4_t, + d: int32x4_t, + n: i64, + ptr: *const i8, + ) -> int32x4x4_t; + } + _vld4q_lane_s32(b.0, b.1, b.2, b.3, LANE as i64, a as _) } -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_u16)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4r) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld4_dup_u16(a: *const u16) -> uint16x4x4_t { - let mut ret_val: uint16x4x4_t = transmute(vld4_dup_s16(transmute(a))); - ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]) }; - ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]) }; - ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [3, 2, 1, 0]) }; - ret_val.3 = unsafe { simd_shuffle!(ret_val.3, ret_val.3, [3, 2, 1, 0]) }; - ret_val +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[cfg_attr(test, assert_instr(vld4, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld4_lane_f32(a: *const f32, b: float32x2x4_t) -> float32x2x4_t { + static_assert_uimm_bits!(LANE, 1); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v2f32.p0")] + fn _vld4_lane_f32( + ptr: *const i8, + a: float32x2_t, + b: float32x2_t, + c: float32x2_t, + d: float32x2_t, + n: i32, + size: i32, + ) -> float32x2x4_t; + } + _vld4_lane_f32(a as _, b.0, b.1, b.2, b.3, LANE, 4) } -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_u16)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4r) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld4q_dup_u16(a: *const u16) -> uint16x8x4_t { - transmute(vld4q_dup_s16(transmute(a))) +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[cfg_attr(test, assert_instr(vld4, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld4q_lane_f32(a: *const f32, b: float32x4x4_t) -> float32x4x4_t { + static_assert_uimm_bits!(LANE, 2); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v4f32.p0")] + fn _vld4q_lane_f32( + ptr: *const i8, + a: float32x4_t, + b: float32x4_t, + c: float32x4_t, + d: float32x4_t, + n: i32, + size: i32, + ) -> float32x4x4_t; + } + _vld4q_lane_f32(a as _, b.0, b.1, b.2, b.3, LANE, 4) } -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_u16)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4r) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld4q_dup_u16(a: *const u16) -> uint16x8x4_t { - let mut ret_val: uint16x8x4_t = transmute(vld4q_dup_s16(transmute(a))); - ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]) }; - ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]) }; - ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]) }; - ret_val.3 = unsafe { simd_shuffle!(ret_val.3, ret_val.3, [7, 6, 5, 4, 3, 2, 1, 0]) }; - ret_val +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[cfg_attr(test, assert_instr(vld4, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld4_lane_s8(a: *const i8, b: int8x8x4_t) -> int8x8x4_t { + static_assert_uimm_bits!(LANE, 3); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v8i8.p0")] + fn _vld4_lane_s8( + ptr: *const i8, + a: int8x8_t, + b: int8x8_t, + c: int8x8_t, + d: int8x8_t, + n: i32, + size: i32, + ) -> int8x8x4_t; + } + _vld4_lane_s8(a as _, b.0, b.1, b.2, b.3, LANE, 1) } -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_u32)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4r) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld4_dup_u32(a: *const u32) -> uint32x2x4_t { - transmute(vld4_dup_s32(transmute(a))) +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[cfg_attr(test, assert_instr(vld4, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld4_lane_s16(a: *const i16, b: int16x4x4_t) -> int16x4x4_t { + static_assert_uimm_bits!(LANE, 2); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v4i16.p0")] + fn _vld4_lane_s16( + ptr: *const i8, + a: int16x4_t, + b: int16x4_t, + c: int16x4_t, + d: int16x4_t, + n: i32, + size: i32, + ) -> int16x4x4_t; + } + _vld4_lane_s16(a as _, b.0, b.1, b.2, b.3, LANE, 2) } -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_u32)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4r) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld4_dup_u32(a: *const u32) -> uint32x2x4_t { - let mut ret_val: uint32x2x4_t = transmute(vld4_dup_s32(transmute(a))); - ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) }; - ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) }; - ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [1, 0]) }; - ret_val.3 = unsafe { simd_shuffle!(ret_val.3, ret_val.3, [1, 0]) }; - ret_val +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[cfg_attr(test, assert_instr(vld4, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld4q_lane_s16(a: *const i16, b: int16x8x4_t) -> int16x8x4_t { + static_assert_uimm_bits!(LANE, 3); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v8i16.p0")] + fn _vld4q_lane_s16( + ptr: *const i8, + a: int16x8_t, + b: int16x8_t, + c: int16x8_t, + d: int16x8_t, + n: i32, + size: i32, + ) -> int16x8x4_t; + } + _vld4q_lane_s16(a as _, b.0, b.1, b.2, b.3, LANE, 2) } -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_u32)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4r) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld4q_dup_u32(a: *const u32) -> uint32x4x4_t { - transmute(vld4q_dup_s32(transmute(a))) +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[cfg_attr(test, assert_instr(vld4, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld4_lane_s32(a: *const i32, b: int32x2x4_t) -> int32x2x4_t { + static_assert_uimm_bits!(LANE, 1); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v2i32.p0")] + fn _vld4_lane_s32( + ptr: *const i8, + a: int32x2_t, + b: int32x2_t, + c: int32x2_t, + d: int32x2_t, + n: i32, + size: i32, + ) -> int32x2x4_t; + } + _vld4_lane_s32(a as _, b.0, b.1, b.2, b.3, LANE, 4) } -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_u32)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4r) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld4q_dup_u32(a: *const u32) -> uint32x4x4_t { - let mut ret_val: uint32x4x4_t = transmute(vld4q_dup_s32(transmute(a))); - ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]) }; - ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]) }; - ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [3, 2, 1, 0]) }; - ret_val.3 = unsafe { simd_shuffle!(ret_val.3, ret_val.3, [3, 2, 1, 0]) }; - ret_val +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[cfg_attr(test, assert_instr(vld4, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld4q_lane_s32(a: *const i32, b: int32x4x4_t) -> int32x4x4_t { + static_assert_uimm_bits!(LANE, 2); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v4i32.p0")] + fn _vld4q_lane_s32( + ptr: *const i8, + a: int32x4_t, + b: int32x4_t, + c: int32x4_t, + d: int32x4_t, + n: i32, + size: i32, + ) -> int32x4x4_t; + } + _vld4q_lane_s32(a as _, b.0, b.1, b.2, b.3, LANE, 4) } -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_p8)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4r) + assert_instr(ld4, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -20605,22 +26785,23 @@ pub unsafe fn vld4q_dup_u32(a: *const u32) -> uint32x4x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld4_dup_p8(a: *const p8) -> poly8x8x4_t { - transmute(vld4_dup_s8(transmute(a))) +pub unsafe fn vld4_lane_u8(a: *const u8, b: uint8x8x4_t) -> uint8x8x4_t { + static_assert_uimm_bits!(LANE, 3); + transmute(vld4_lane_s8::(transmute(a), transmute(b))) } -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_p8)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4r) + assert_instr(ld4, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -20628,28 +26809,24 @@ pub unsafe fn vld4_dup_p8(a: *const p8) -> poly8x8x4_t { #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld4_dup_p8(a: *const p8) -> poly8x8x4_t { - let mut ret_val: poly8x8x4_t = transmute(vld4_dup_s8(transmute(a))); - ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]) }; - ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]) }; - ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]) }; - ret_val.3 = unsafe { simd_shuffle!(ret_val.3, ret_val.3, [7, 6, 5, 4, 3, 2, 1, 0]) }; - ret_val +)] +pub unsafe fn vld4_lane_u16(a: *const u16, b: uint16x4x4_t) -> uint16x4x4_t { + static_assert_uimm_bits!(LANE, 2); + transmute(vld4_lane_s16::(transmute(a), transmute(b))) } -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_p8)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4r) + assert_instr(ld4, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -20658,22 +26835,23 @@ pub unsafe fn vld4_dup_p8(a: *const p8) -> poly8x8x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld4q_dup_p8(a: *const p8) -> poly8x16x4_t { - transmute(vld4q_dup_s8(transmute(a))) +pub unsafe fn vld4q_lane_u16(a: *const u16, b: uint16x8x4_t) -> uint16x8x4_t { + static_assert_uimm_bits!(LANE, 3); + transmute(vld4q_lane_s16::(transmute(a), transmute(b))) } -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_p8)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4r) + assert_instr(ld4, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -20682,51 +26860,23 @@ pub unsafe fn vld4q_dup_p8(a: *const p8) -> poly8x16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld4q_dup_p8(a: *const p8) -> poly8x16x4_t { - let mut ret_val: poly8x16x4_t = transmute(vld4q_dup_s8(transmute(a))); - ret_val.0 = unsafe { - simd_shuffle!( - ret_val.0, - ret_val.0, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; - ret_val.1 = unsafe { - simd_shuffle!( - ret_val.1, - ret_val.1, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; - ret_val.2 = unsafe { - simd_shuffle!( - ret_val.2, - ret_val.2, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; - ret_val.3 = unsafe { - simd_shuffle!( - ret_val.3, - ret_val.3, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; - ret_val +pub unsafe fn vld4_lane_u32(a: *const u32, b: uint32x2x4_t) -> uint32x2x4_t { + static_assert_uimm_bits!(LANE, 1); + transmute(vld4_lane_s32::(transmute(a), transmute(b))) } -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_p16)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4r) + assert_instr(ld4, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -20735,22 +26885,23 @@ pub unsafe fn vld4q_dup_p8(a: *const p8) -> poly8x16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld4_dup_p16(a: *const p16) -> poly16x4x4_t { - transmute(vld4_dup_s16(transmute(a))) +pub unsafe fn vld4q_lane_u32(a: *const u32, b: uint32x4x4_t) -> uint32x4x4_t { + static_assert_uimm_bits!(LANE, 2); + transmute(vld4q_lane_s32::(transmute(a), transmute(b))) } -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_p16)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4r) + assert_instr(ld4, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -20759,27 +26910,23 @@ pub unsafe fn vld4_dup_p16(a: *const p16) -> poly16x4x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld4_dup_p16(a: *const p16) -> poly16x4x4_t { - let mut ret_val: poly16x4x4_t = transmute(vld4_dup_s16(transmute(a))); - ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]) }; - ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]) }; - ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [3, 2, 1, 0]) }; - ret_val.3 = unsafe { simd_shuffle!(ret_val.3, ret_val.3, [3, 2, 1, 0]) }; - ret_val +pub unsafe fn vld4_lane_p8(a: *const p8, b: poly8x8x4_t) -> poly8x8x4_t { + static_assert_uimm_bits!(LANE, 3); + transmute(vld4_lane_s8::(transmute(a), transmute(b))) } -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_p16)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4r) + assert_instr(ld4, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -20788,22 +26935,23 @@ pub unsafe fn vld4_dup_p16(a: *const p16) -> poly16x4x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld4q_dup_p16(a: *const p16) -> poly16x8x4_t { - transmute(vld4q_dup_s16(transmute(a))) +pub unsafe fn vld4_lane_p16(a: *const p16, b: poly16x4x4_t) -> poly16x4x4_t { + static_assert_uimm_bits!(LANE, 2); + transmute(vld4_lane_s16::(transmute(a), transmute(b))) } -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_p16)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4r) + assert_instr(ld4, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -20812,879 +26960,746 @@ pub unsafe fn vld4q_dup_p16(a: *const p16) -> poly16x8x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld4q_dup_p16(a: *const p16) -> poly16x8x4_t { - let mut ret_val: poly16x8x4_t = transmute(vld4q_dup_s16(transmute(a))); - ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]) }; - ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]) }; - ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]) }; - ret_val.3 = unsafe { simd_shuffle!(ret_val.3, ret_val.3, [7, 6, 5, 4, 3, 2, 1, 0]) }; - ret_val -} -#[doc = "Load single 4-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_f16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg(target_arch = "arm")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub unsafe fn vld4_f16(a: *const f16) -> float16x4x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v4f16.p0f16")] - fn _vld4_f16(ptr: *const f16, size: i32) -> float16x4x4_t; - } - _vld4_f16(a as _, 2) -} -#[doc = "Load single 4-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_f16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg(target_arch = "arm")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub unsafe fn vld4q_f16(a: *const f16) -> float16x8x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v8f16.p0f16")] - fn _vld4q_f16(ptr: *const f16, size: i32) -> float16x8x4_t; - } - _vld4q_f16(a as _, 2) +pub unsafe fn vld4q_lane_p16(a: *const p16, b: poly16x8x4_t) -> poly16x8x4_t { + static_assert_uimm_bits!(LANE, 3); + transmute(vld4q_lane_s16::(transmute(a), transmute(b))) } -#[doc = "Load single 4-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_f16)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(not(target_arch = "arm"))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4) + assert_instr(nop) )] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub unsafe fn vld4_f16(a: *const f16) -> float16x4x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4.v4f16.p0f16" - )] - fn _vld4_f16(ptr: *const f16) -> float16x4x4_t; - } - _vld4_f16(a as _) -} -#[doc = "Load single 4-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_f16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(not(target_arch = "arm"))] #[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4) + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") )] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub unsafe fn vld4q_f16(a: *const f16) -> float16x8x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4.v8f16.p0f16" - )] - fn _vld4q_f16(ptr: *const f16) -> float16x8x4_t; - } - _vld4q_f16(a as _) -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld4))] -pub unsafe fn vld4_f32(a: *const f32) -> float32x2x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4.v2f32.p0" - )] - fn _vld4_f32(ptr: *const float32x2_t) -> float32x2x4_t; - } - _vld4_f32(a as _) +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld4_p64(a: *const p64) -> poly64x1x4_t { + transmute(vld4_s64(transmute(a))) } #[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld4))] -pub unsafe fn vld4q_f32(a: *const f32) -> float32x4x4_t { +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vld4_s64(a: *const i64) -> int64x1x4_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4.v4f32.p0" + link_name = "llvm.aarch64.neon.ld4.v1i64.p0" )] - fn _vld4q_f32(ptr: *const float32x4_t) -> float32x4x4_t; + fn _vld4_s64(ptr: *const int64x1_t) -> int64x1x4_t; } - _vld4q_f32(a as _) + _vld4_s64(a as _) } #[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld4))] -pub unsafe fn vld4_s8(a: *const i8) -> int8x8x4_t { +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vld4_s64(a: *const i64) -> int64x1x4_t { unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4.v8i8.p0" - )] - fn _vld4_s8(ptr: *const int8x8_t) -> int8x8x4_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v1i64.p0")] + fn _vld4_s64(ptr: *const i8, size: i32) -> int64x1x4_t; } - _vld4_s8(a as _) + _vld4_s64(a as *const i8, 8) } #[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld4))] -pub unsafe fn vld4q_s8(a: *const i8) -> int8x16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4.v16i8.p0" - )] - fn _vld4q_s8(ptr: *const int8x16_t) -> int8x16x4_t; - } - _vld4q_s8(a as _) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld4_u64(a: *const u64) -> uint64x1x4_t { + transmute(vld4_s64(transmute(a))) } #[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld4))] -pub unsafe fn vld4_s16(a: *const i16) -> int16x4x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4.v4i16.p0" - )] - fn _vld4_s16(ptr: *const int16x4_t) -> int16x4x4_t; - } - _vld4_s16(a as _) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld4) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld4_u8(a: *const u8) -> uint8x8x4_t { + transmute(vld4_s8(transmute(a))) } #[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld4))] -pub unsafe fn vld4q_s16(a: *const i16) -> int16x8x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4.v8i16.p0" - )] - fn _vld4q_s16(ptr: *const int16x8_t) -> int16x8x4_t; - } - _vld4q_s16(a as _) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld4) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld4_u8(a: *const u8) -> uint8x8x4_t { + let mut ret_val: uint8x8x4_t = transmute(vld4_s8(transmute(a))); + ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]) }; + ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]) }; + ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]) }; + ret_val.3 = unsafe { simd_shuffle!(ret_val.3, ret_val.3, [7, 6, 5, 4, 3, 2, 1, 0]) }; + ret_val } #[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld4))] -pub unsafe fn vld4_s32(a: *const i32) -> int32x2x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4.v2i32.p0" - )] - fn _vld4_s32(ptr: *const int32x2_t) -> int32x2x4_t; - } - _vld4_s32(a as _) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld4) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld4q_u8(a: *const u8) -> uint8x16x4_t { + transmute(vld4q_s8(transmute(a))) } #[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld4))] -pub unsafe fn vld4q_s32(a: *const i32) -> int32x4x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4.v4i32.p0" - )] - fn _vld4q_s32(ptr: *const int32x4_t) -> int32x4x4_t; - } - _vld4q_s32(a as _) -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld4))] -pub unsafe fn vld4_f32(a: *const f32) -> float32x2x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v2f32.p0")] - fn _vld4_f32(ptr: *const i8, size: i32) -> float32x2x4_t; - } - _vld4_f32(a as *const i8, 4) -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld4))] -pub unsafe fn vld4q_f32(a: *const f32) -> float32x4x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v4f32.p0")] - fn _vld4q_f32(ptr: *const i8, size: i32) -> float32x4x4_t; - } - _vld4q_f32(a as *const i8, 4) -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld4))] -pub unsafe fn vld4_s8(a: *const i8) -> int8x8x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v8i8.p0")] - fn _vld4_s8(ptr: *const i8, size: i32) -> int8x8x4_t; - } - _vld4_s8(a as *const i8, 1) -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld4))] -pub unsafe fn vld4q_s8(a: *const i8) -> int8x16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v16i8.p0")] - fn _vld4q_s8(ptr: *const i8, size: i32) -> int8x16x4_t; - } - _vld4q_s8(a as *const i8, 1) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld4) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld4q_u8(a: *const u8) -> uint8x16x4_t { + let mut ret_val: uint8x16x4_t = transmute(vld4q_s8(transmute(a))); + ret_val.0 = unsafe { + simd_shuffle!( + ret_val.0, + ret_val.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) + }; + ret_val.1 = unsafe { + simd_shuffle!( + ret_val.1, + ret_val.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) + }; + ret_val.2 = unsafe { + simd_shuffle!( + ret_val.2, + ret_val.2, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) + }; + ret_val.3 = unsafe { + simd_shuffle!( + ret_val.3, + ret_val.3, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) + }; + ret_val } #[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld4))] -pub unsafe fn vld4_s16(a: *const i16) -> int16x4x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v4i16.p0")] - fn _vld4_s16(ptr: *const i8, size: i32) -> int16x4x4_t; - } - _vld4_s16(a as *const i8, 2) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld4) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld4_u16(a: *const u16) -> uint16x4x4_t { + transmute(vld4_s16(transmute(a))) } #[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld4))] -pub unsafe fn vld4q_s16(a: *const i16) -> int16x8x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v8i16.p0")] - fn _vld4q_s16(ptr: *const i8, size: i32) -> int16x8x4_t; - } - _vld4q_s16(a as *const i8, 2) +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld4) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld4_u16(a: *const u16) -> uint16x4x4_t { + let mut ret_val: uint16x4x4_t = transmute(vld4_s16(transmute(a))); + ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]) }; + ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]) }; + ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [3, 2, 1, 0]) }; + ret_val.3 = unsafe { simd_shuffle!(ret_val.3, ret_val.3, [3, 2, 1, 0]) }; + ret_val } #[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld4))] -pub unsafe fn vld4_s32(a: *const i32) -> int32x2x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v2i32.p0")] - fn _vld4_s32(ptr: *const i8, size: i32) -> int32x2x4_t; - } - _vld4_s32(a as *const i8, 4) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld4) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld4q_u16(a: *const u16) -> uint16x8x4_t { + transmute(vld4q_s16(transmute(a))) } #[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld4))] -pub unsafe fn vld4q_s32(a: *const i32) -> int32x4x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v4i32.p0")] - fn _vld4q_s32(ptr: *const i8, size: i32) -> int32x4x4_t; - } - _vld4q_s32(a as *const i8, 4) -} -#[doc = "Load multiple 4-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_f16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub unsafe fn vld4_lane_f16(a: *const f16, b: float16x4x4_t) -> float16x4x4_t { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v4f16.p0")] - fn _vld4_lane_f16( - ptr: *const f16, - a: float16x4_t, - b: float16x4_t, - c: float16x4_t, - d: float16x4_t, - n: i32, - size: i32, - ) -> float16x4x4_t; - } - _vld4_lane_f16(a as _, b.0, b.1, b.2, b.3, LANE, 2) -} -#[doc = "Load multiple 4-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_f16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub unsafe fn vld4q_lane_f16(a: *const f16, b: float16x8x4_t) -> float16x8x4_t { - static_assert_uimm_bits!(LANE, 3); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v8f16.p0")] - fn _vld4q_lane_f16( - ptr: *const f16, - a: float16x8_t, - b: float16x8_t, - c: float16x8_t, - d: float16x8_t, - n: i32, - size: i32, - ) -> float16x8x4_t; - } - _vld4q_lane_f16(a as _, b.0, b.1, b.2, b.3, LANE, 2) -} -#[doc = "Load multiple 4-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_f16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(not(target_arch = "arm"))] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4, LANE = 0) + assert_instr(ld4) )] -#[rustc_legacy_const_generics(2)] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub unsafe fn vld4_lane_f16(a: *const f16, b: float16x4x4_t) -> float16x4x4_t { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4lane.v4f16.p0" - )] - fn _vld4_lane_f16( - a: float16x4_t, - b: float16x4_t, - c: float16x4_t, - d: float16x4_t, - n: i64, - ptr: *const f16, - ) -> float16x4x4_t; - } - _vld4_lane_f16(b.0, b.1, b.2, b.3, LANE as i64, a as _) +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld4q_u16(a: *const u16) -> uint16x8x4_t { + let mut ret_val: uint16x8x4_t = transmute(vld4q_s16(transmute(a))); + ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]) }; + ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]) }; + ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]) }; + ret_val.3 = unsafe { simd_shuffle!(ret_val.3, ret_val.3, [7, 6, 5, 4, 3, 2, 1, 0]) }; + ret_val } -#[doc = "Load multiple 4-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_f16)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(not(target_arch = "arm"))] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4, LANE = 0) + assert_instr(ld4) )] -#[rustc_legacy_const_generics(2)] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub unsafe fn vld4q_lane_f16(a: *const f16, b: float16x8x4_t) -> float16x8x4_t { - static_assert_uimm_bits!(LANE, 3); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4lane.v8f16.p0" - )] - fn _vld4q_lane_f16( - a: float16x8_t, - b: float16x8_t, - c: float16x8_t, - d: float16x8_t, - n: i64, - ptr: *const f16, - ) -> float16x8x4_t; - } - _vld4q_lane_f16(b.0, b.1, b.2, b.3, LANE as i64, a as _) +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld4_u32(a: *const u32) -> uint32x2x4_t { + transmute(vld4_s32(transmute(a))) } #[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld4, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld4_lane_f32(a: *const f32, b: float32x2x4_t) -> float32x2x4_t { - static_assert_uimm_bits!(LANE, 1); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4lane.v2f32.p0" - )] - fn _vld4_lane_f32( - a: float32x2_t, - b: float32x2_t, - c: float32x2_t, - d: float32x2_t, - n: i64, - ptr: *const i8, - ) -> float32x2x4_t; - } - _vld4_lane_f32(b.0, b.1, b.2, b.3, LANE as i64, a as _) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld4) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld4_u32(a: *const u32) -> uint32x2x4_t { + let mut ret_val: uint32x2x4_t = transmute(vld4_s32(transmute(a))); + ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) }; + ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) }; + ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [1, 0]) }; + ret_val.3 = unsafe { simd_shuffle!(ret_val.3, ret_val.3, [1, 0]) }; + ret_val } #[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld4, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld4q_lane_f32(a: *const f32, b: float32x4x4_t) -> float32x4x4_t { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4lane.v4f32.p0" - )] - fn _vld4q_lane_f32( - a: float32x4_t, - b: float32x4_t, - c: float32x4_t, - d: float32x4_t, - n: i64, - ptr: *const i8, - ) -> float32x4x4_t; - } - _vld4q_lane_f32(b.0, b.1, b.2, b.3, LANE as i64, a as _) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld4) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld4q_u32(a: *const u32) -> uint32x4x4_t { + transmute(vld4q_s32(transmute(a))) } #[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld4, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld4_lane_s8(a: *const i8, b: int8x8x4_t) -> int8x8x4_t { - static_assert_uimm_bits!(LANE, 3); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4lane.v8i8.p0" - )] - fn _vld4_lane_s8( - a: int8x8_t, - b: int8x8_t, - c: int8x8_t, - d: int8x8_t, - n: i64, - ptr: *const i8, - ) -> int8x8x4_t; - } - _vld4_lane_s8(b.0, b.1, b.2, b.3, LANE as i64, a as _) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld4) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld4q_u32(a: *const u32) -> uint32x4x4_t { + let mut ret_val: uint32x4x4_t = transmute(vld4q_s32(transmute(a))); + ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]) }; + ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]) }; + ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [3, 2, 1, 0]) }; + ret_val.3 = unsafe { simd_shuffle!(ret_val.3, ret_val.3, [3, 2, 1, 0]) }; + ret_val } #[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld4, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld4_lane_s16(a: *const i16, b: int16x4x4_t) -> int16x4x4_t { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4lane.v4i16.p0" - )] - fn _vld4_lane_s16( - a: int16x4_t, - b: int16x4_t, - c: int16x4_t, - d: int16x4_t, - n: i64, - ptr: *const i8, - ) -> int16x4x4_t; - } - _vld4_lane_s16(b.0, b.1, b.2, b.3, LANE as i64, a as _) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld4) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld4_p8(a: *const p8) -> poly8x8x4_t { + transmute(vld4_s8(transmute(a))) } #[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld4, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld4q_lane_s16(a: *const i16, b: int16x8x4_t) -> int16x8x4_t { - static_assert_uimm_bits!(LANE, 3); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4lane.v8i16.p0" - )] - fn _vld4q_lane_s16( - a: int16x8_t, - b: int16x8_t, - c: int16x8_t, - d: int16x8_t, - n: i64, - ptr: *const i8, - ) -> int16x8x4_t; - } - _vld4q_lane_s16(b.0, b.1, b.2, b.3, LANE as i64, a as _) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld4) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld4_p8(a: *const p8) -> poly8x8x4_t { + let mut ret_val: poly8x8x4_t = transmute(vld4_s8(transmute(a))); + ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]) }; + ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]) }; + ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]) }; + ret_val.3 = unsafe { simd_shuffle!(ret_val.3, ret_val.3, [7, 6, 5, 4, 3, 2, 1, 0]) }; + ret_val } #[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld4, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld4_lane_s32(a: *const i32, b: int32x2x4_t) -> int32x2x4_t { - static_assert_uimm_bits!(LANE, 1); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4lane.v2i32.p0" - )] - fn _vld4_lane_s32( - a: int32x2_t, - b: int32x2_t, - c: int32x2_t, - d: int32x2_t, - n: i64, - ptr: *const i8, - ) -> int32x2x4_t; - } - _vld4_lane_s32(b.0, b.1, b.2, b.3, LANE as i64, a as _) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld4) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld4q_p8(a: *const p8) -> poly8x16x4_t { + transmute(vld4q_s8(transmute(a))) } #[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld4, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld4q_lane_s32(a: *const i32, b: int32x4x4_t) -> int32x4x4_t { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4lane.v4i32.p0" - )] - fn _vld4q_lane_s32( - a: int32x4_t, - b: int32x4_t, - c: int32x4_t, - d: int32x4_t, - n: i64, - ptr: *const i8, - ) -> int32x4x4_t; - } - _vld4q_lane_s32(b.0, b.1, b.2, b.3, LANE as i64, a as _) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld4) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld4q_p8(a: *const p8) -> poly8x16x4_t { + let mut ret_val: poly8x16x4_t = transmute(vld4q_s8(transmute(a))); + ret_val.0 = unsafe { + simd_shuffle!( + ret_val.0, + ret_val.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) + }; + ret_val.1 = unsafe { + simd_shuffle!( + ret_val.1, + ret_val.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) + }; + ret_val.2 = unsafe { + simd_shuffle!( + ret_val.2, + ret_val.2, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) + }; + ret_val.3 = unsafe { + simd_shuffle!( + ret_val.3, + ret_val.3, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) + }; + ret_val } #[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[cfg_attr(test, assert_instr(vld4, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld4_lane_f32(a: *const f32, b: float32x2x4_t) -> float32x2x4_t { - static_assert_uimm_bits!(LANE, 1); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v2f32.p0")] - fn _vld4_lane_f32( - ptr: *const i8, - a: float32x2_t, - b: float32x2_t, - c: float32x2_t, - d: float32x2_t, - n: i32, - size: i32, - ) -> float32x2x4_t; - } - _vld4_lane_f32(a as _, b.0, b.1, b.2, b.3, LANE, 4) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld4) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld4_p16(a: *const p16) -> poly16x4x4_t { + transmute(vld4_s16(transmute(a))) } #[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[cfg_attr(test, assert_instr(vld4, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld4q_lane_f32(a: *const f32, b: float32x4x4_t) -> float32x4x4_t { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v4f32.p0")] - fn _vld4q_lane_f32( - ptr: *const i8, - a: float32x4_t, - b: float32x4_t, - c: float32x4_t, - d: float32x4_t, - n: i32, - size: i32, - ) -> float32x4x4_t; - } - _vld4q_lane_f32(a as _, b.0, b.1, b.2, b.3, LANE, 4) +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld4) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld4_p16(a: *const p16) -> poly16x4x4_t { + let mut ret_val: poly16x4x4_t = transmute(vld4_s16(transmute(a))); + ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]) }; + ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]) }; + ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [3, 2, 1, 0]) }; + ret_val.3 = unsafe { simd_shuffle!(ret_val.3, ret_val.3, [3, 2, 1, 0]) }; + ret_val } #[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[cfg_attr(test, assert_instr(vld4, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld4_lane_s8(a: *const i8, b: int8x8x4_t) -> int8x8x4_t { - static_assert_uimm_bits!(LANE, 3); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v8i8.p0")] - fn _vld4_lane_s8( - ptr: *const i8, - a: int8x8_t, - b: int8x8_t, - c: int8x8_t, - d: int8x8_t, - n: i32, - size: i32, - ) -> int8x8x4_t; - } - _vld4_lane_s8(a as _, b.0, b.1, b.2, b.3, LANE, 1) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld4) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld4q_p16(a: *const p16) -> poly16x8x4_t { + transmute(vld4q_s16(transmute(a))) } #[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[cfg_attr(test, assert_instr(vld4, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld4_lane_s16(a: *const i16, b: int16x4x4_t) -> int16x4x4_t { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v4i16.p0")] - fn _vld4_lane_s16( - ptr: *const i8, - a: int16x4_t, - b: int16x4_t, - c: int16x4_t, - d: int16x4_t, - n: i32, - size: i32, - ) -> int16x4x4_t; - } - _vld4_lane_s16(a as _, b.0, b.1, b.2, b.3, LANE, 2) +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld4) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld4q_p16(a: *const p16) -> poly16x8x4_t { + let mut ret_val: poly16x8x4_t = transmute(vld4q_s16(transmute(a))); + ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]) }; + ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]) }; + ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]) }; + ret_val.3 = unsafe { simd_shuffle!(ret_val.3, ret_val.3, [7, 6, 5, 4, 3, 2, 1, 0]) }; + ret_val } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_s16)"] +#[doc = "Store SIMD&FP register (immediate offset)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vldrq_p128)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[cfg_attr(test, assert_instr(vld4, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld4q_lane_s16(a: *const i16, b: int16x8x4_t) -> int16x8x4_t { - static_assert_uimm_bits!(LANE, 3); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v8i16.p0")] - fn _vld4q_lane_s16( - ptr: *const i8, - a: int16x8_t, - b: int16x8_t, - c: int16x8_t, - d: int16x8_t, - n: i32, - size: i32, - ) -> int16x8x4_t; - } - _vld4q_lane_s16(a as _, b.0, b.1, b.2, b.3, LANE, 2) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vldrq_p128(a: *const p128) -> p128 { + *a } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] +#[doc = "Maximum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_f16)"] #[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[cfg_attr(test, assert_instr(vld4, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld4_lane_s32(a: *const i32, b: int32x2x4_t) -> int32x2x4_t { - static_assert_uimm_bits!(LANE, 1); +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fmax) +)] +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +pub fn vmax_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v2i32.p0")] - fn _vld4_lane_s32( - ptr: *const i8, - a: int32x2_t, - b: int32x2_t, - c: int32x2_t, - d: int32x2_t, - n: i32, - size: i32, - ) -> int32x2x4_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v4f16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmax.v4f16" + )] + fn _vmax_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t; } - _vld4_lane_s32(a as _, b.0, b.1, b.2, b.3, LANE, 4) + unsafe { _vmax_f16(a, b) } } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] +#[doc = "Maximum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_f16)"] #[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[cfg_attr(test, assert_instr(vld4, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld4q_lane_s32(a: *const i32, b: int32x4x4_t) -> int32x4x4_t { - static_assert_uimm_bits!(LANE, 2); +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fmax) +)] +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +pub fn vmaxq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v4i32.p0")] - fn _vld4q_lane_s32( - ptr: *const i8, - a: int32x4_t, - b: int32x4_t, - c: int32x4_t, - d: int32x4_t, - n: i32, - size: i32, - ) -> int32x4x4_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v8f16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmax.v8f16" + )] + fn _vmaxq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t; } - _vld4q_lane_s32(a as _, b.0, b.1, b.2, b.3, LANE, 4) + unsafe { _vmaxq_f16(a, b) } } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] +#[doc = "Maximum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_f32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4, LANE = 0) + assert_instr(fmax) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -21693,23 +27708,27 @@ pub unsafe fn vld4q_lane_s32(a: *const i32, b: int32x4x4_t) -> target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld4_lane_u8(a: *const u8, b: uint8x8x4_t) -> uint8x8x4_t { - static_assert_uimm_bits!(LANE, 3); - transmute(vld4_lane_s8::(transmute(a), transmute(b))) +pub fn vmax_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v2f32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmax.v2f32" + )] + fn _vmax_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; + } + unsafe { _vmax_f32(a, b) } } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] +#[doc = "Maximum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_f32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4, LANE = 0) + assert_instr(fmax) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -21718,23 +27737,27 @@ pub unsafe fn vld4_lane_u8(a: *const u8, b: uint8x8x4_t) -> uin target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld4_lane_u16(a: *const u16, b: uint16x4x4_t) -> uint16x4x4_t { - static_assert_uimm_bits!(LANE, 2); - transmute(vld4_lane_s16::(transmute(a), transmute(b))) +pub fn vmaxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v4f32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmax.v4f32" + )] + fn _vmaxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; + } + unsafe { _vmaxq_f32(a, b) } } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] +#[doc = "Maximum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_s8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4, LANE = 0) + assert_instr(smax) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -21743,23 +27766,27 @@ pub unsafe fn vld4_lane_u16(a: *const u16, b: uint16x4x4_t) -> target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld4q_lane_u16(a: *const u16, b: uint16x8x4_t) -> uint16x8x4_t { - static_assert_uimm_bits!(LANE, 3); - transmute(vld4q_lane_s16::(transmute(a), transmute(b))) +pub fn vmax_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v8i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.smax.v8i8" + )] + fn _vmax_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + } + unsafe { _vmax_s8(a, b) } } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] +#[doc = "Maximum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_s8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4, LANE = 0) + assert_instr(smax) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -21768,23 +27795,27 @@ pub unsafe fn vld4q_lane_u16(a: *const u16, b: uint16x8x4_t) -> target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld4_lane_u32(a: *const u32, b: uint32x2x4_t) -> uint32x2x4_t { - static_assert_uimm_bits!(LANE, 1); - transmute(vld4_lane_s32::(transmute(a), transmute(b))) +pub fn vmaxq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v16i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.smax.v16i8" + )] + fn _vmaxq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + } + unsafe { _vmaxq_s8(a, b) } } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] +#[doc = "Maximum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4, LANE = 0) + assert_instr(smax) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -21793,23 +27824,27 @@ pub unsafe fn vld4_lane_u32(a: *const u32, b: uint32x2x4_t) -> target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld4q_lane_u32(a: *const u32, b: uint32x4x4_t) -> uint32x4x4_t { - static_assert_uimm_bits!(LANE, 2); - transmute(vld4q_lane_s32::(transmute(a), transmute(b))) +pub fn vmax_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v4i16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.smax.v4i16" + )] + fn _vmax_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + } + unsafe { _vmax_s16(a, b) } } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_p8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] +#[doc = "Maximum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4, LANE = 0) + assert_instr(smax) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -21818,23 +27853,27 @@ pub unsafe fn vld4q_lane_u32(a: *const u32, b: uint32x4x4_t) -> target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld4_lane_p8(a: *const p8, b: poly8x8x4_t) -> poly8x8x4_t { - static_assert_uimm_bits!(LANE, 3); - transmute(vld4_lane_s8::(transmute(a), transmute(b))) +pub fn vmaxq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v8i16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.smax.v8i16" + )] + fn _vmaxq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + } + unsafe { _vmaxq_s16(a, b) } } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_p16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] +#[doc = "Maximum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4, LANE = 0) + assert_instr(smax) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -21843,23 +27882,27 @@ pub unsafe fn vld4_lane_p8(a: *const p8, b: poly8x8x4_t) -> pol target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld4_lane_p16(a: *const p16, b: poly16x4x4_t) -> poly16x4x4_t { - static_assert_uimm_bits!(LANE, 2); - transmute(vld4_lane_s16::(transmute(a), transmute(b))) +pub fn vmax_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v2i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.smax.v2i32" + )] + fn _vmax_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + } + unsafe { _vmax_s32(a, b) } } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_p16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] +#[doc = "Maximum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4, LANE = 0) + assert_instr(smax) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -21868,21 +27911,26 @@ pub unsafe fn vld4_lane_p16(a: *const p16, b: poly16x4x4_t) -> target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld4q_lane_p16(a: *const p16, b: poly16x8x4_t) -> poly16x8x4_t { - static_assert_uimm_bits!(LANE, 3); - transmute(vld4q_lane_s16::(transmute(a), transmute(b))) +pub fn vmaxq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v4i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.smax.v4i32" + )] + fn _vmaxq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + } + unsafe { _vmaxq_s32(a, b) } } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_p64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] +#[doc = "Maximum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_u8)"] #[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(umax) )] #[cfg_attr( not(target_arch = "arm"), @@ -21892,55 +27940,26 @@ pub unsafe fn vld4q_lane_p16(a: *const p16, b: poly16x8x4_t) -> target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld4_p64(a: *const p64) -> poly64x1x4_t { - transmute(vld4_s64(transmute(a))) -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vld4_s64(a: *const i64) -> int64x1x4_t { +pub fn vmax_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxu.v8i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4.v1i64.p0" + link_name = "llvm.aarch64.neon.umax.v8i8" )] - fn _vld4_s64(ptr: *const int64x1_t) -> int64x1x4_t; - } - _vld4_s64(a as _) -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vld4_s64(a: *const i64) -> int64x1x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v1i64.p0")] - fn _vld4_s64(ptr: *const i8, size: i32) -> int64x1x4_t; + fn _vmax_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t; } - _vld4_s64(a as *const i8, 8) + unsafe { _vmax_u8(a, b) } } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] +#[doc = "Maximum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_u8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(umax) )] #[cfg_attr( not(target_arch = "arm"), @@ -21950,21 +27969,26 @@ pub unsafe fn vld4_s64(a: *const i64) -> int64x1x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld4_u64(a: *const u64) -> uint64x1x4_t { - transmute(vld4_s64(transmute(a))) +pub fn vmaxq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxu.v16i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.umax.v16i8" + )] + fn _vmaxq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t; + } + unsafe { _vmaxq_u8(a, b) } } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] +#[doc = "Maximum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_u16)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4) + assert_instr(umax) )] #[cfg_attr( not(target_arch = "arm"), @@ -21974,21 +27998,26 @@ pub unsafe fn vld4_u64(a: *const u64) -> uint64x1x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld4_u8(a: *const u8) -> uint8x8x4_t { - transmute(vld4_s8(transmute(a))) +pub fn vmax_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxu.v4i16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.umax.v4i16" + )] + fn _vmax_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t; + } + unsafe { _vmax_u16(a, b) } } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] +#[doc = "Maximum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_u16)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4) + assert_instr(umax) )] #[cfg_attr( not(target_arch = "arm"), @@ -21998,26 +28027,26 @@ pub unsafe fn vld4_u8(a: *const u8) -> uint8x8x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld4_u8(a: *const u8) -> uint8x8x4_t { - let mut ret_val: uint8x8x4_t = transmute(vld4_s8(transmute(a))); - ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]) }; - ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]) }; - ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]) }; - ret_val.3 = unsafe { simd_shuffle!(ret_val.3, ret_val.3, [7, 6, 5, 4, 3, 2, 1, 0]) }; - ret_val +pub fn vmaxq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxu.v8i16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.umax.v8i16" + )] + fn _vmaxq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t; + } + unsafe { _vmaxq_u16(a, b) } } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] +#[doc = "Maximum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_u32)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4) + assert_instr(umax) )] #[cfg_attr( not(target_arch = "arm"), @@ -22027,21 +28056,26 @@ pub unsafe fn vld4_u8(a: *const u8) -> uint8x8x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld4q_u8(a: *const u8) -> uint8x16x4_t { - transmute(vld4q_s8(transmute(a))) +pub fn vmax_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxu.v2i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.umax.v2i32" + )] + fn _vmax_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t; + } + unsafe { _vmax_u32(a, b) } } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] +#[doc = "Maximum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_u32)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4) + assert_instr(umax) )] #[cfg_attr( not(target_arch = "arm"), @@ -22051,50 +28085,70 @@ pub unsafe fn vld4q_u8(a: *const u8) -> uint8x16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld4q_u8(a: *const u8) -> uint8x16x4_t { - let mut ret_val: uint8x16x4_t = transmute(vld4q_s8(transmute(a))); - ret_val.0 = unsafe { - simd_shuffle!( - ret_val.0, - ret_val.0, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; - ret_val.1 = unsafe { - simd_shuffle!( - ret_val.1, - ret_val.1, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; - ret_val.2 = unsafe { - simd_shuffle!( - ret_val.2, - ret_val.2, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; - ret_val.3 = unsafe { - simd_shuffle!( - ret_val.3, - ret_val.3, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; - ret_val +pub fn vmaxq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxu.v4i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.umax.v4i32" + )] + fn _vmaxq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t; + } + unsafe { _vmaxq_u32(a, b) } } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] +#[doc = "Floating-point Maximum Number (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnm_f16)"] +#[inline] +#[cfg_attr(target_arch = "arm", target_feature(enable = "fp-armv8,v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmaxnm))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fmaxnm) +)] +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +pub fn vmaxnm_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxnm.v4f16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmaxnm.v4f16" + )] + fn _vmaxnm_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t; + } + unsafe { _vmaxnm_f16(a, b) } +} +#[doc = "Floating-point Maximum Number (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmq_f16)"] +#[inline] +#[cfg_attr(target_arch = "arm", target_feature(enable = "fp-armv8,v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmaxnm))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fmaxnm) +)] +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +pub fn vmaxnmq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxnm.v8f16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmaxnm.v8f16" + )] + fn _vmaxnmq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t; + } + unsafe { _vmaxnmq_f16(a, b) } +} +#[doc = "Floating-point Maximum Number (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnm_f32)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "fp-armv8,v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmaxnm))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4) + assert_instr(fmaxnm) )] #[cfg_attr( not(target_arch = "arm"), @@ -22104,21 +28158,26 @@ pub unsafe fn vld4q_u8(a: *const u8) -> uint8x16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld4_u16(a: *const u16) -> uint16x4x4_t { - transmute(vld4_s16(transmute(a))) +pub fn vmaxnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxnm.v2f32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmaxnm.v2f32" + )] + fn _vmaxnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; + } + unsafe { _vmaxnm_f32(a, b) } } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] +#[doc = "Floating-point Maximum Number (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmq_f32)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "fp-armv8,v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmaxnm))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4) + assert_instr(fmaxnm) )] #[cfg_attr( not(target_arch = "arm"), @@ -22128,26 +28187,70 @@ pub unsafe fn vld4_u16(a: *const u16) -> uint16x4x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld4_u16(a: *const u16) -> uint16x4x4_t { - let mut ret_val: uint16x4x4_t = transmute(vld4_s16(transmute(a))); - ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]) }; - ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]) }; - ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [3, 2, 1, 0]) }; - ret_val.3 = unsafe { simd_shuffle!(ret_val.3, ret_val.3, [3, 2, 1, 0]) }; - ret_val +pub fn vmaxnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxnm.v4f32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmaxnm.v4f32" + )] + fn _vmaxnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; + } + unsafe { _vmaxnmq_f32(a, b) } } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] +#[doc = "Minimum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_f16)"] +#[inline] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fmin) +)] +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +pub fn vmin_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v4f16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmin.v4f16" + )] + fn _vmin_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t; + } + unsafe { _vmin_f16(a, b) } +} +#[doc = "Minimum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_f16)"] +#[inline] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fmin) +)] +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +pub fn vminq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v8f16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmin.v8f16" + )] + fn _vminq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t; + } + unsafe { _vminq_f16(a, b) } +} +#[doc = "Minimum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_f32)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4) + assert_instr(fmin) )] #[cfg_attr( not(target_arch = "arm"), @@ -22157,21 +28260,26 @@ pub unsafe fn vld4_u16(a: *const u16) -> uint16x4x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld4q_u16(a: *const u16) -> uint16x8x4_t { - transmute(vld4q_s16(transmute(a))) +pub fn vmin_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v2f32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmin.v2f32" + )] + fn _vmin_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; + } + unsafe { _vmin_f32(a, b) } } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] +#[doc = "Minimum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_f32)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4) + assert_instr(fmin) )] #[cfg_attr( not(target_arch = "arm"), @@ -22181,26 +28289,26 @@ pub unsafe fn vld4q_u16(a: *const u16) -> uint16x8x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld4q_u16(a: *const u16) -> uint16x8x4_t { - let mut ret_val: uint16x8x4_t = transmute(vld4q_s16(transmute(a))); - ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]) }; - ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]) }; - ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]) }; - ret_val.3 = unsafe { simd_shuffle!(ret_val.3, ret_val.3, [7, 6, 5, 4, 3, 2, 1, 0]) }; - ret_val +pub fn vminq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v4f32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmin.v4f32" + )] + fn _vminq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; + } + unsafe { _vminq_f32(a, b) } } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] +#[doc = "Minimum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_s8)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4) + assert_instr(smin) )] #[cfg_attr( not(target_arch = "arm"), @@ -22210,21 +28318,26 @@ pub unsafe fn vld4q_u16(a: *const u16) -> uint16x8x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld4_u32(a: *const u32) -> uint32x2x4_t { - transmute(vld4_s32(transmute(a))) +pub fn vmin_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v8i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.smin.v8i8" + )] + fn _vmin_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + } + unsafe { _vmin_s8(a, b) } } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] +#[doc = "Minimum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_s8)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4) + assert_instr(smin) )] #[cfg_attr( not(target_arch = "arm"), @@ -22234,26 +28347,26 @@ pub unsafe fn vld4_u32(a: *const u32) -> uint32x2x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld4_u32(a: *const u32) -> uint32x2x4_t { - let mut ret_val: uint32x2x4_t = transmute(vld4_s32(transmute(a))); - ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) }; - ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) }; - ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [1, 0]) }; - ret_val.3 = unsafe { simd_shuffle!(ret_val.3, ret_val.3, [1, 0]) }; - ret_val +pub fn vminq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v16i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.smin.v16i8" + )] + fn _vminq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + } + unsafe { _vminq_s8(a, b) } } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] +#[doc = "Minimum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_s16)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4) + assert_instr(smin) )] #[cfg_attr( not(target_arch = "arm"), @@ -22263,21 +28376,26 @@ pub unsafe fn vld4_u32(a: *const u32) -> uint32x2x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld4q_u32(a: *const u32) -> uint32x4x4_t { - transmute(vld4q_s32(transmute(a))) +pub fn vmin_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v4i16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.smin.v4i16" + )] + fn _vmin_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + } + unsafe { _vmin_s16(a, b) } } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] +#[doc = "Minimum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_s16)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4) + assert_instr(smin) )] #[cfg_attr( not(target_arch = "arm"), @@ -22287,26 +28405,26 @@ pub unsafe fn vld4q_u32(a: *const u32) -> uint32x4x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld4q_u32(a: *const u32) -> uint32x4x4_t { - let mut ret_val: uint32x4x4_t = transmute(vld4q_s32(transmute(a))); - ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]) }; - ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]) }; - ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [3, 2, 1, 0]) }; - ret_val.3 = unsafe { simd_shuffle!(ret_val.3, ret_val.3, [3, 2, 1, 0]) }; - ret_val +pub fn vminq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v8i16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.smin.v8i16" + )] + fn _vminq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + } + unsafe { _vminq_s16(a, b) } } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_p8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] +#[doc = "Minimum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_s32)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4) + assert_instr(smin) )] #[cfg_attr( not(target_arch = "arm"), @@ -22316,21 +28434,26 @@ pub unsafe fn vld4q_u32(a: *const u32) -> uint32x4x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld4_p8(a: *const p8) -> poly8x8x4_t { - transmute(vld4_s8(transmute(a))) +pub fn vmin_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v2i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.smin.v2i32" + )] + fn _vmin_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + } + unsafe { _vmin_s32(a, b) } } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_p8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] +#[doc = "Minimum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_s32)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4) + assert_instr(smin) )] #[cfg_attr( not(target_arch = "arm"), @@ -22340,26 +28463,26 @@ pub unsafe fn vld4_p8(a: *const p8) -> poly8x8x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld4_p8(a: *const p8) -> poly8x8x4_t { - let mut ret_val: poly8x8x4_t = transmute(vld4_s8(transmute(a))); - ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]) }; - ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]) }; - ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]) }; - ret_val.3 = unsafe { simd_shuffle!(ret_val.3, ret_val.3, [7, 6, 5, 4, 3, 2, 1, 0]) }; - ret_val +pub fn vminq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v4i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.smin.v4i32" + )] + fn _vminq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + } + unsafe { _vminq_s32(a, b) } } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_p8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] +#[doc = "Minimum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_u8)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4) + assert_instr(umin) )] #[cfg_attr( not(target_arch = "arm"), @@ -22369,21 +28492,26 @@ pub unsafe fn vld4_p8(a: *const p8) -> poly8x8x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld4q_p8(a: *const p8) -> poly8x16x4_t { - transmute(vld4q_s8(transmute(a))) +pub fn vmin_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminu.v8i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.umin.v8i8" + )] + fn _vmin_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t; + } + unsafe { _vmin_u8(a, b) } } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_p8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] +#[doc = "Minimum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_u8)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4) + assert_instr(umin) )] #[cfg_attr( not(target_arch = "arm"), @@ -22393,50 +28521,26 @@ pub unsafe fn vld4q_p8(a: *const p8) -> poly8x16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld4q_p8(a: *const p8) -> poly8x16x4_t { - let mut ret_val: poly8x16x4_t = transmute(vld4q_s8(transmute(a))); - ret_val.0 = unsafe { - simd_shuffle!( - ret_val.0, - ret_val.0, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; - ret_val.1 = unsafe { - simd_shuffle!( - ret_val.1, - ret_val.1, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; - ret_val.2 = unsafe { - simd_shuffle!( - ret_val.2, - ret_val.2, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; - ret_val.3 = unsafe { - simd_shuffle!( - ret_val.3, - ret_val.3, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; - ret_val +pub fn vminq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminu.v16i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.umin.v16i8" + )] + fn _vminq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t; + } + unsafe { _vminq_u8(a, b) } } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_p16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] +#[doc = "Minimum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_u16)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4) + assert_instr(umin) )] #[cfg_attr( not(target_arch = "arm"), @@ -22446,21 +28550,26 @@ pub unsafe fn vld4q_p8(a: *const p8) -> poly8x16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld4_p16(a: *const p16) -> poly16x4x4_t { - transmute(vld4_s16(transmute(a))) +pub fn vmin_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminu.v4i16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.umin.v4i16" + )] + fn _vmin_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t; + } + unsafe { _vmin_u16(a, b) } } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_p16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] +#[doc = "Minimum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_u16)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4) + assert_instr(umin) )] #[cfg_attr( not(target_arch = "arm"), @@ -22470,26 +28579,26 @@ pub unsafe fn vld4_p16(a: *const p16) -> poly16x4x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld4_p16(a: *const p16) -> poly16x4x4_t { - let mut ret_val: poly16x4x4_t = transmute(vld4_s16(transmute(a))); - ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]) }; - ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]) }; - ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [3, 2, 1, 0]) }; - ret_val.3 = unsafe { simd_shuffle!(ret_val.3, ret_val.3, [3, 2, 1, 0]) }; - ret_val +pub fn vminq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminu.v8i16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.umin.v8i16" + )] + fn _vminq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t; + } + unsafe { _vminq_u16(a, b) } } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_p16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] +#[doc = "Minimum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_u32)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4) + assert_instr(umin) )] #[cfg_attr( not(target_arch = "arm"), @@ -22499,21 +28608,26 @@ pub unsafe fn vld4_p16(a: *const p16) -> poly16x4x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld4q_p16(a: *const p16) -> poly16x8x4_t { - transmute(vld4q_s16(transmute(a))) +pub fn vmin_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminu.v2i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.umin.v2i32" + )] + fn _vmin_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t; + } + unsafe { _vmin_u32(a, b) } } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_p16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] +#[doc = "Minimum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_u32)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4) + assert_instr(umin) )] #[cfg_attr( not(target_arch = "arm"), @@ -22523,96 +28637,70 @@ pub unsafe fn vld4q_p16(a: *const p16) -> poly16x8x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld4q_p16(a: *const p16) -> poly16x8x4_t { - let mut ret_val: poly16x8x4_t = transmute(vld4q_s16(transmute(a))); - ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]) }; - ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]) }; - ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]) }; - ret_val.3 = unsafe { simd_shuffle!(ret_val.3, ret_val.3, [7, 6, 5, 4, 3, 2, 1, 0]) }; - ret_val -} -#[doc = "Maximum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_f16)"] -#[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmax) -)] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vmax_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { +pub fn vminq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v4f16")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminu.v4i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmax.v4f16" + link_name = "llvm.aarch64.neon.umin.v4i32" )] - fn _vmax_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t; + fn _vminq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t; } - unsafe { _vmax_f16(a, b) } + unsafe { _vminq_u32(a, b) } } -#[doc = "Maximum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_f16)"] +#[doc = "Floating-point Minimum Number (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnm_f16)"] #[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "fp-armv8,v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vminnm))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmax) + assert_instr(fminnm) )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vmaxq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { +pub fn vminnm_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v8f16")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminnm.v4f16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmax.v8f16" + link_name = "llvm.aarch64.neon.fminnm.v4f16" )] - fn _vmaxq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t; + fn _vminnm_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t; } - unsafe { _vmaxq_f16(a, b) } + unsafe { _vminnm_f16(a, b) } } -#[doc = "Maximum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_f32)"] +#[doc = "Floating-point Minimum Number (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmq_f16)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "fp-armv8,v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vminnm))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmax) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + assert_instr(fminnm) )] -pub fn vmax_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +pub fn vminnmq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v2f32")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminnm.v8f16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmax.v2f32" + link_name = "llvm.aarch64.neon.fminnm.v8f16" )] - fn _vmax_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; + fn _vminnmq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t; } - unsafe { _vmax_f32(a, b) } + unsafe { _vminnmq_f16(a, b) } } -#[doc = "Maximum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_f32)"] +#[doc = "Floating-point Minimum Number (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnm_f32)"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "fp-armv8,v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vminnm))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmax) + assert_instr(fminnm) )] #[cfg_attr( not(target_arch = "arm"), @@ -22622,26 +28710,26 @@ pub fn vmax_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmaxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { +pub fn vminnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v4f32")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminnm.v2f32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmax.v4f32" + link_name = "llvm.aarch64.neon.fminnm.v2f32" )] - fn _vmaxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; + fn _vminnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; } - unsafe { _vmaxq_f32(a, b) } + unsafe { _vminnm_f32(a, b) } } -#[doc = "Maximum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_s8)"] +#[doc = "Floating-point Minimum Number (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmq_f32)"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "fp-armv8,v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vminnm))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smax) + assert_instr(fminnm) )] #[cfg_attr( not(target_arch = "arm"), @@ -22651,26 +28739,26 @@ pub fn vmaxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmax_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { +pub fn vminnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v8i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminnm.v4f32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.smax.v8i8" + link_name = "llvm.aarch64.neon.fminnm.v4f32" )] - fn _vmax_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + fn _vminnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; } - unsafe { _vmax_s8(a, b) } + unsafe { _vminnmq_f32(a, b) } } -#[doc = "Maximum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_s8)"] +#[doc = "Floating-point multiply-add to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_f32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smax) + assert_instr(fmul) )] #[cfg_attr( not(target_arch = "arm"), @@ -22680,26 +28768,18 @@ pub fn vmax_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmaxq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v16i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.smax.v16i8" - )] - fn _vmaxq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; - } - unsafe { _vmaxq_s8(a, b) } +pub fn vmla_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { + unsafe { simd_add(a, simd_mul(b, c)) } } -#[doc = "Maximum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_s16)"] +#[doc = "Floating-point multiply-add to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_f32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smax) + assert_instr(fmul) )] #[cfg_attr( not(target_arch = "arm"), @@ -22709,27 +28789,20 @@ pub fn vmaxq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmax_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v4i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.smax.v4i16" - )] - fn _vmax_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - } - unsafe { _vmax_s16(a, b) } +pub fn vmlaq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { + unsafe { simd_add(a, simd_mul(b, c)) } } -#[doc = "Maximum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_s16)"] +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_lane_f32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smax) + assert_instr(fmul, LANE = 1) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -22738,27 +28811,25 @@ pub fn vmax_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmaxq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v8i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.smax.v8i16" - )] - fn _vmaxq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; - } - unsafe { _vmaxq_s16(a, b) } +pub fn vmla_lane_f32( + a: float32x2_t, + b: float32x2_t, + c: float32x2_t, +) -> float32x2_t { + static_assert_uimm_bits!(LANE, 1); + unsafe { vmla_f32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) } } -#[doc = "Maximum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_s32)"] +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_laneq_f32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smax) + assert_instr(fmul, LANE = 1) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -22767,27 +28838,25 @@ pub fn vmaxq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmax_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v2i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.smax.v2i32" - )] - fn _vmax_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - } - unsafe { _vmax_s32(a, b) } +pub fn vmla_laneq_f32( + a: float32x2_t, + b: float32x2_t, + c: float32x4_t, +) -> float32x2_t { + static_assert_uimm_bits!(LANE, 2); + unsafe { vmla_f32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) } } -#[doc = "Maximum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_s32)"] +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_lane_f32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smax) + assert_instr(fmul, LANE = 1) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -22796,27 +28865,31 @@ pub fn vmax_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmaxq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v4i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.smax.v4i32" - )] - fn _vmaxq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; +pub fn vmlaq_lane_f32( + a: float32x4_t, + b: float32x4_t, + c: float32x2_t, +) -> float32x4_t { + static_assert_uimm_bits!(LANE, 1); + unsafe { + vmlaq_f32( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) } - unsafe { _vmaxq_s32(a, b) } } -#[doc = "Maximum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_u8)"] +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_laneq_f32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umax) + assert_instr(fmul, LANE = 1) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -22825,27 +28898,31 @@ pub fn vmaxq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmax_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxu.v8i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.umax.v8i8" - )] - fn _vmax_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t; +pub fn vmlaq_laneq_f32( + a: float32x4_t, + b: float32x4_t, + c: float32x4_t, +) -> float32x4_t { + static_assert_uimm_bits!(LANE, 2); + unsafe { + vmlaq_f32( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) } - unsafe { _vmax_u8(a, b) } } -#[doc = "Maximum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_u8)"] +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_lane_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umax) + assert_instr(mla, LANE = 1) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -22854,27 +28931,27 @@ pub fn vmax_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmaxq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxu.v16i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.umax.v16i8" - )] - fn _vmaxq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t; +pub fn vmla_lane_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { + static_assert_uimm_bits!(LANE, 2); + unsafe { + vmla_s16( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) } - unsafe { _vmaxq_u8(a, b) } } -#[doc = "Maximum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_u16)"] +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_lane_u16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umax) + assert_instr(mla, LANE = 1) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -22883,27 +28960,27 @@ pub fn vmaxq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmax_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxu.v4i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.umax.v4i16" - )] - fn _vmax_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t; +pub fn vmla_lane_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x4_t) -> uint16x4_t { + static_assert_uimm_bits!(LANE, 2); + unsafe { + vmla_u16( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) } - unsafe { _vmax_u16(a, b) } } -#[doc = "Maximum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_u16)"] +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_laneq_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umax) + assert_instr(mla, LANE = 1) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -22912,27 +28989,27 @@ pub fn vmax_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmaxq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxu.v8i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.umax.v8i16" - )] - fn _vmaxq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t; +pub fn vmla_laneq_s16(a: int16x4_t, b: int16x4_t, c: int16x8_t) -> int16x4_t { + static_assert_uimm_bits!(LANE, 3); + unsafe { + vmla_s16( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) } - unsafe { _vmaxq_u16(a, b) } } -#[doc = "Maximum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_u32)"] +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_laneq_u16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umax) + assert_instr(mla, LANE = 1) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -22941,27 +29018,27 @@ pub fn vmaxq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmax_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxu.v2i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.umax.v2i32" - )] - fn _vmax_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t; +pub fn vmla_laneq_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x8_t) -> uint16x4_t { + static_assert_uimm_bits!(LANE, 3); + unsafe { + vmla_u16( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) } - unsafe { _vmax_u32(a, b) } } -#[doc = "Maximum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_u32)"] +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_lane_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umax) + assert_instr(mla, LANE = 1) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -22970,71 +29047,82 @@ pub fn vmax_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmaxq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxu.v4i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.umax.v4i32" - )] - fn _vmaxq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t; +pub fn vmlaq_lane_s16(a: int16x8_t, b: int16x8_t, c: int16x4_t) -> int16x8_t { + static_assert_uimm_bits!(LANE, 2); + unsafe { + vmlaq_s16( + a, + b, + simd_shuffle!( + c, + c, + [ + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32 + ] + ), + ) } - unsafe { _vmaxq_u32(a, b) } } -#[doc = "Floating-point Maximum Number (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnm_f16)"] +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_lane_u16)"] #[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "fp-armv8,v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmaxnm))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmaxnm) + assert_instr(mla, LANE = 1) )] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vmaxnm_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxnm.v4f16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmaxnm.v4f16" - )] - fn _vmaxnm_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t; - } - unsafe { _vmaxnm_f16(a, b) } -} -#[doc = "Floating-point Maximum Number (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmq_f16)"] -#[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "fp-armv8,v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmaxnm))] +#[rustc_legacy_const_generics(3)] #[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmaxnm) + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") )] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vmaxnmq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxnm.v8f16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmaxnm.v8f16" - )] - fn _vmaxnmq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t; +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vmlaq_lane_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x4_t) -> uint16x8_t { + static_assert_uimm_bits!(LANE, 2); + unsafe { + vmlaq_u16( + a, + b, + simd_shuffle!( + c, + c, + [ + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32 + ] + ), + ) } - unsafe { _vmaxnmq_f16(a, b) } } -#[doc = "Floating-point Maximum Number (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnm_f32)"] +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_laneq_s16)"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "fp-armv8,v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmaxnm))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmaxnm) + assert_instr(mla, LANE = 1) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -23043,27 +29131,40 @@ pub fn vmaxnmq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmaxnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxnm.v2f32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmaxnm.v2f32" - )] - fn _vmaxnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; +pub fn vmlaq_laneq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { + static_assert_uimm_bits!(LANE, 3); + unsafe { + vmlaq_s16( + a, + b, + simd_shuffle!( + c, + c, + [ + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32 + ] + ), + ) } - unsafe { _vmaxnm_f32(a, b) } } -#[doc = "Floating-point Maximum Number (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmq_f32)"] +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_laneq_u16)"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "fp-armv8,v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmaxnm))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmaxnm) + assert_instr(mla, LANE = 1) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -23072,71 +29173,63 @@ pub fn vmaxnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmaxnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxnm.v4f32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmaxnm.v4f32" - )] - fn _vmaxnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; +pub fn vmlaq_laneq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t { + static_assert_uimm_bits!(LANE, 3); + unsafe { + vmlaq_u16( + a, + b, + simd_shuffle!( + c, + c, + [ + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32 + ] + ), + ) } - unsafe { _vmaxnmq_f32(a, b) } } -#[doc = "Minimum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_f16)"] +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_lane_s32)"] #[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmin) + assert_instr(mla, LANE = 1) )] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vmin_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v4f16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmin.v4f16" - )] - fn _vmin_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t; - } - unsafe { _vmin_f16(a, b) } -} -#[doc = "Minimum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_f16)"] -#[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] +#[rustc_legacy_const_generics(3)] #[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmin) + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") )] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vminq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v8f16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmin.v8f16" - )] - fn _vminq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t; - } - unsafe { _vminq_f16(a, b) } +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vmla_lane_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { + static_assert_uimm_bits!(LANE, 1); + unsafe { vmla_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) } } -#[doc = "Minimum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_f32)"] +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_lane_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmin) + assert_instr(mla, LANE = 1) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -23145,27 +29238,21 @@ pub fn vminq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmin_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v2f32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmin.v2f32" - )] - fn _vmin_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; - } - unsafe { _vmin_f32(a, b) } +pub fn vmla_lane_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x2_t) -> uint32x2_t { + static_assert_uimm_bits!(LANE, 1); + unsafe { vmla_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) } } -#[doc = "Minimum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_f32)"] +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_laneq_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmin) + assert_instr(mla, LANE = 1) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -23174,27 +29261,21 @@ pub fn vmin_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vminq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v4f32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmin.v4f32" - )] - fn _vminq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; - } - unsafe { _vminq_f32(a, b) } +pub fn vmla_laneq_s32(a: int32x2_t, b: int32x2_t, c: int32x4_t) -> int32x2_t { + static_assert_uimm_bits!(LANE, 2); + unsafe { vmla_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) } } -#[doc = "Minimum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_s8)"] +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_laneq_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smin) + assert_instr(mla, LANE = 1) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -23203,27 +29284,21 @@ pub fn vminq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmin_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v8i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.smin.v8i8" - )] - fn _vmin_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; - } - unsafe { _vmin_s8(a, b) } +pub fn vmla_laneq_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x4_t) -> uint32x2_t { + static_assert_uimm_bits!(LANE, 2); + unsafe { vmla_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) } } -#[doc = "Minimum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_s8)"] +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_lane_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smin) + assert_instr(mla, LANE = 1) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -23232,27 +29307,27 @@ pub fn vmin_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vminq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v16i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.smin.v16i8" - )] - fn _vminq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; +pub fn vmlaq_lane_s32(a: int32x4_t, b: int32x4_t, c: int32x2_t) -> int32x4_t { + static_assert_uimm_bits!(LANE, 1); + unsafe { + vmlaq_s32( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) } - unsafe { _vminq_s8(a, b) } } -#[doc = "Minimum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_s16)"] +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_lane_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smin) + assert_instr(mla, LANE = 1) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -23261,27 +29336,27 @@ pub fn vminq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmin_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v4i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.smin.v4i16" - )] - fn _vmin_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; +pub fn vmlaq_lane_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x2_t) -> uint32x4_t { + static_assert_uimm_bits!(LANE, 1); + unsafe { + vmlaq_u32( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) } - unsafe { _vmin_s16(a, b) } } -#[doc = "Minimum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_s16)"] +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_laneq_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smin) + assert_instr(mla, LANE = 1) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -23290,27 +29365,27 @@ pub fn vmin_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vminq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v8i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.smin.v8i16" - )] - fn _vminq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; +pub fn vmlaq_laneq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { + static_assert_uimm_bits!(LANE, 2); + unsafe { + vmlaq_s32( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) } - unsafe { _vminq_s16(a, b) } } -#[doc = "Minimum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_s32)"] +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_laneq_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smin) + assert_instr(mla, LANE = 1) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -23319,26 +29394,25 @@ pub fn vminq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmin_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v2i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.smin.v2i32" - )] - fn _vmin_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; +pub fn vmlaq_laneq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { + static_assert_uimm_bits!(LANE, 2); + unsafe { + vmlaq_u32( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) } - unsafe { _vmin_s32(a, b) } -} -#[doc = "Minimum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_s32)"] +} +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_n_f32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smin) + assert_instr(fmul) )] #[cfg_attr( not(target_arch = "arm"), @@ -23348,26 +29422,18 @@ pub fn vmin_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vminq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v4i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.smin.v4i32" - )] - fn _vminq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - } - unsafe { _vminq_s32(a, b) } +pub fn vmla_n_f32(a: float32x2_t, b: float32x2_t, c: f32) -> float32x2_t { + vmla_f32(a, b, vdup_n_f32(c)) } -#[doc = "Minimum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_u8)"] +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_n_f32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umin) + assert_instr(fmul) )] #[cfg_attr( not(target_arch = "arm"), @@ -23377,26 +29443,18 @@ pub fn vminq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmin_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminu.v8i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.umin.v8i8" - )] - fn _vmin_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t; - } - unsafe { _vmin_u8(a, b) } +pub fn vmlaq_n_f32(a: float32x4_t, b: float32x4_t, c: f32) -> float32x4_t { + vmlaq_f32(a, b, vdupq_n_f32(c)) } -#[doc = "Minimum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_u8)"] +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_n_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umin) + assert_instr(mla) )] #[cfg_attr( not(target_arch = "arm"), @@ -23406,26 +29464,18 @@ pub fn vmin_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vminq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminu.v16i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.umin.v16i8" - )] - fn _vminq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t; - } - unsafe { _vminq_u8(a, b) } +pub fn vmla_n_s16(a: int16x4_t, b: int16x4_t, c: i16) -> int16x4_t { + vmla_s16(a, b, vdup_n_s16(c)) } -#[doc = "Minimum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_u16)"] +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_n_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umin) + assert_instr(mla) )] #[cfg_attr( not(target_arch = "arm"), @@ -23435,26 +29485,18 @@ pub fn vminq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmin_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminu.v4i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.umin.v4i16" - )] - fn _vmin_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t; - } - unsafe { _vmin_u16(a, b) } +pub fn vmlaq_n_s16(a: int16x8_t, b: int16x8_t, c: i16) -> int16x8_t { + vmlaq_s16(a, b, vdupq_n_s16(c)) } -#[doc = "Minimum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_u16)"] +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_n_u16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umin) + assert_instr(mla) )] #[cfg_attr( not(target_arch = "arm"), @@ -23464,26 +29506,18 @@ pub fn vmin_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vminq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminu.v8i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.umin.v8i16" - )] - fn _vminq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t; - } - unsafe { _vminq_u16(a, b) } +pub fn vmla_n_u16(a: uint16x4_t, b: uint16x4_t, c: u16) -> uint16x4_t { + vmla_u16(a, b, vdup_n_u16(c)) } -#[doc = "Minimum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_u32)"] +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_n_u16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umin) + assert_instr(mla) )] #[cfg_attr( not(target_arch = "arm"), @@ -23493,26 +29527,18 @@ pub fn vminq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmin_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminu.v2i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.umin.v2i32" - )] - fn _vmin_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t; - } - unsafe { _vmin_u32(a, b) } +pub fn vmlaq_n_u16(a: uint16x8_t, b: uint16x8_t, c: u16) -> uint16x8_t { + vmlaq_u16(a, b, vdupq_n_u16(c)) } -#[doc = "Minimum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_u32)"] +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_n_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umin) + assert_instr(mla) )] #[cfg_attr( not(target_arch = "arm"), @@ -23522,70 +29548,18 @@ pub fn vmin_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vminq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminu.v4i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.umin.v4i32" - )] - fn _vminq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t; - } - unsafe { _vminq_u32(a, b) } -} -#[doc = "Floating-point Minimum Number (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnm_f16)"] -#[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "fp-armv8,v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vminnm))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fminnm) -)] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vminnm_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminnm.v4f16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fminnm.v4f16" - )] - fn _vminnm_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t; - } - unsafe { _vminnm_f16(a, b) } -} -#[doc = "Floating-point Minimum Number (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmq_f16)"] -#[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "fp-armv8,v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vminnm))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fminnm) -)] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vminnmq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminnm.v8f16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fminnm.v8f16" - )] - fn _vminnmq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t; - } - unsafe { _vminnmq_f16(a, b) } +pub fn vmla_n_s32(a: int32x2_t, b: int32x2_t, c: i32) -> int32x2_t { + vmla_s32(a, b, vdup_n_s32(c)) } -#[doc = "Floating-point Minimum Number (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnm_f32)"] +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_n_s32)"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "fp-armv8,v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vminnm))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fminnm) + assert_instr(mla) )] #[cfg_attr( not(target_arch = "arm"), @@ -23595,26 +29569,18 @@ pub fn vminnmq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vminnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminnm.v2f32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fminnm.v2f32" - )] - fn _vminnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; - } - unsafe { _vminnm_f32(a, b) } +pub fn vmlaq_n_s32(a: int32x4_t, b: int32x4_t, c: i32) -> int32x4_t { + vmlaq_s32(a, b, vdupq_n_s32(c)) } -#[doc = "Floating-point Minimum Number (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmq_f32)"] +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_n_u32)"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "fp-armv8,v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vminnm))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fminnm) + assert_instr(mla) )] #[cfg_attr( not(target_arch = "arm"), @@ -23624,26 +29590,18 @@ pub fn vminnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vminnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminnm.v4f32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fminnm.v4f32" - )] - fn _vminnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; - } - unsafe { _vminnmq_f32(a, b) } +pub fn vmla_n_u32(a: uint32x2_t, b: uint32x2_t, c: u32) -> uint32x2_t { + vmla_u32(a, b, vdup_n_u32(c)) } -#[doc = "Floating-point multiply-add to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_f32)"] +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_n_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmul) + assert_instr(mla) )] #[cfg_attr( not(target_arch = "arm"), @@ -23653,18 +29611,18 @@ pub fn vminnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmla_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { - unsafe { simd_add(a, simd_mul(b, c)) } +pub fn vmlaq_n_u32(a: uint32x4_t, b: uint32x4_t, c: u32) -> uint32x4_t { + vmlaq_u32(a, b, vdupq_n_u32(c)) } -#[doc = "Floating-point multiply-add to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_f32)"] +#[doc = "Multiply-add to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_s8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmul) + assert_instr(mla) )] #[cfg_attr( not(target_arch = "arm"), @@ -23674,20 +29632,19 @@ pub fn vmla_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlaq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { +pub fn vmla_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { unsafe { simd_add(a, simd_mul(b, c)) } } -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_lane_f32)"] +#[doc = "Multiply-add to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_s8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmul, LANE = 1) + assert_instr(mla) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -23696,25 +29653,19 @@ pub fn vmlaq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmla_lane_f32( - a: float32x2_t, - b: float32x2_t, - c: float32x2_t, -) -> float32x2_t { - static_assert_uimm_bits!(LANE, 1); - unsafe { vmla_f32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) } +pub fn vmlaq_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t { + unsafe { simd_add(a, simd_mul(b, c)) } } -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_laneq_f32)"] +#[doc = "Multiply-add to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmul, LANE = 1) + assert_instr(mla) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -23723,25 +29674,19 @@ pub fn vmla_lane_f32( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmla_laneq_f32( - a: float32x2_t, - b: float32x2_t, - c: float32x4_t, -) -> float32x2_t { - static_assert_uimm_bits!(LANE, 2); - unsafe { vmla_f32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) } +pub fn vmla_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { + unsafe { simd_add(a, simd_mul(b, c)) } } -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_lane_f32)"] +#[doc = "Multiply-add to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmul, LANE = 1) + assert_instr(mla) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -23750,31 +29695,19 @@ pub fn vmla_laneq_f32( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlaq_lane_f32( - a: float32x4_t, - b: float32x4_t, - c: float32x2_t, -) -> float32x4_t { - static_assert_uimm_bits!(LANE, 1); - unsafe { - vmlaq_f32( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) - } +pub fn vmlaq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { + unsafe { simd_add(a, simd_mul(b, c)) } } -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_laneq_f32)"] +#[doc = "Multiply-add to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmul, LANE = 1) + assert_instr(mla) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -23783,31 +29716,19 @@ pub fn vmlaq_lane_f32( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlaq_laneq_f32( - a: float32x4_t, - b: float32x4_t, - c: float32x4_t, -) -> float32x4_t { - static_assert_uimm_bits!(LANE, 2); - unsafe { - vmlaq_f32( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) - } +pub fn vmla_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { + unsafe { simd_add(a, simd_mul(b, c)) } } -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_lane_s16)"] +#[doc = "Multiply-add to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla, LANE = 1) + assert_instr(mla) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -23816,27 +29737,19 @@ pub fn vmlaq_laneq_f32( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmla_lane_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { - static_assert_uimm_bits!(LANE, 2); - unsafe { - vmla_s16( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) - } +pub fn vmlaq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { + unsafe { simd_add(a, simd_mul(b, c)) } } -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_lane_u16)"] +#[doc = "Multiply-add to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_u8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla, LANE = 1) + assert_instr(mla) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -23845,27 +29758,19 @@ pub fn vmla_lane_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmla_lane_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x4_t) -> uint16x4_t { - static_assert_uimm_bits!(LANE, 2); - unsafe { - vmla_u16( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) - } +pub fn vmla_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t { + unsafe { simd_add(a, simd_mul(b, c)) } } -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_laneq_s16)"] +#[doc = "Multiply-add to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_u8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla, LANE = 1) + assert_instr(mla) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -23874,27 +29779,19 @@ pub fn vmla_lane_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x4_ target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmla_laneq_s16(a: int16x4_t, b: int16x4_t, c: int16x8_t) -> int16x4_t { - static_assert_uimm_bits!(LANE, 3); - unsafe { - vmla_s16( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) - } +pub fn vmlaq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t { + unsafe { simd_add(a, simd_mul(b, c)) } } -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_laneq_u16)"] +#[doc = "Multiply-add to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_u16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla, LANE = 1) + assert_instr(mla) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -23903,27 +29800,19 @@ pub fn vmla_laneq_s16(a: int16x4_t, b: int16x4_t, c: int16x8_t) target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmla_laneq_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x8_t) -> uint16x4_t { - static_assert_uimm_bits!(LANE, 3); - unsafe { - vmla_u16( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) - } +pub fn vmla_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x4_t) -> uint16x4_t { + unsafe { simd_add(a, simd_mul(b, c)) } } -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_lane_s16)"] +#[doc = "Multiply-add to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_u16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla, LANE = 1) + assert_instr(mla) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -23932,40 +29821,19 @@ pub fn vmla_laneq_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x8 target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlaq_lane_s16(a: int16x8_t, b: int16x8_t, c: int16x4_t) -> int16x8_t { - static_assert_uimm_bits!(LANE, 2); - unsafe { - vmlaq_s16( - a, - b, - simd_shuffle!( - c, - c, - [ - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32 - ] - ), - ) - } +pub fn vmlaq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t { + unsafe { simd_add(a, simd_mul(b, c)) } } -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_lane_u16)"] +#[doc = "Multiply-add to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla, LANE = 1) + assert_instr(mla) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -23974,40 +29842,19 @@ pub fn vmlaq_lane_s16(a: int16x8_t, b: int16x8_t, c: int16x4_t) target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlaq_lane_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x4_t) -> uint16x8_t { - static_assert_uimm_bits!(LANE, 2); - unsafe { - vmlaq_u16( - a, - b, - simd_shuffle!( - c, - c, - [ - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32 - ] - ), - ) - } +pub fn vmla_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x2_t) -> uint32x2_t { + unsafe { simd_add(a, simd_mul(b, c)) } } -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_laneq_s16)"] +#[doc = "Multiply-add to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla, LANE = 1) + assert_instr(mla) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -24016,38 +29863,18 @@ pub fn vmlaq_lane_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x4 target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlaq_laneq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { - static_assert_uimm_bits!(LANE, 3); - unsafe { - vmlaq_s16( - a, - b, - simd_shuffle!( - c, - c, - [ - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32 - ] - ), - ) - } +pub fn vmlaq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { + unsafe { simd_add(a, simd_mul(b, c)) } } -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_laneq_u16)"] +#[doc = "Vector widening multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_lane_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s16", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla, LANE = 1) + assert_instr(smlal, LANE = 1) )] #[rustc_legacy_const_generics(3)] #[cfg_attr( @@ -24058,38 +29885,25 @@ pub fn vmlaq_laneq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlaq_laneq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t { - static_assert_uimm_bits!(LANE, 3); +pub fn vmlal_lane_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { + static_assert_uimm_bits!(LANE, 2); unsafe { - vmlaq_u16( + vmlal_s16( a, b, - simd_shuffle!( - c, - c, - [ - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32 - ] - ), + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), ) } } -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_lane_s32)"] +#[doc = "Vector widening multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_laneq_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s16", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla, LANE = 1) + assert_instr(smlal, LANE = 1) )] #[rustc_legacy_const_generics(3)] #[cfg_attr( @@ -24100,19 +29914,25 @@ pub fn vmlaq_laneq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmla_lane_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { - static_assert_uimm_bits!(LANE, 1); - unsafe { vmla_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) } +pub fn vmlal_laneq_s16(a: int32x4_t, b: int16x4_t, c: int16x8_t) -> int32x4_t { + static_assert_uimm_bits!(LANE, 3); + unsafe { + vmlal_s16( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) + } } -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_lane_u32)"] +#[doc = "Vector widening multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_lane_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s32", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla, LANE = 1) + assert_instr(smlal, LANE = 1) )] #[rustc_legacy_const_generics(3)] #[cfg_attr( @@ -24123,19 +29943,19 @@ pub fn vmla_lane_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmla_lane_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x2_t) -> uint32x2_t { +pub fn vmlal_lane_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { static_assert_uimm_bits!(LANE, 1); - unsafe { vmla_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) } + unsafe { vmlal_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) } } -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_laneq_s32)"] +#[doc = "Vector widening multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_laneq_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s32", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla, LANE = 1) + assert_instr(smlal, LANE = 1) )] #[rustc_legacy_const_generics(3)] #[cfg_attr( @@ -24146,19 +29966,19 @@ pub fn vmla_lane_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x2_ target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmla_laneq_s32(a: int32x2_t, b: int32x2_t, c: int32x4_t) -> int32x2_t { +pub fn vmlal_laneq_s32(a: int64x2_t, b: int32x2_t, c: int32x4_t) -> int64x2_t { static_assert_uimm_bits!(LANE, 2); - unsafe { vmla_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) } + unsafe { vmlal_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) } } -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_laneq_u32)"] +#[doc = "Vector widening multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_lane_u16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u16", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla, LANE = 1) + assert_instr(umlal, LANE = 1) )] #[rustc_legacy_const_generics(3)] #[cfg_attr( @@ -24169,19 +29989,25 @@ pub fn vmla_laneq_s32(a: int32x2_t, b: int32x2_t, c: int32x4_t) target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmla_laneq_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x4_t) -> uint32x2_t { +pub fn vmlal_lane_u16(a: uint32x4_t, b: uint16x4_t, c: uint16x4_t) -> uint32x4_t { static_assert_uimm_bits!(LANE, 2); - unsafe { vmla_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) } + unsafe { + vmlal_u16( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) + } } -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_lane_s32)"] +#[doc = "Vector widening multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_laneq_u16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u16", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla, LANE = 1) + assert_instr(umlal, LANE = 1) )] #[rustc_legacy_const_generics(3)] #[cfg_attr( @@ -24192,25 +30018,25 @@ pub fn vmla_laneq_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x4 target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlaq_lane_s32(a: int32x4_t, b: int32x4_t, c: int32x2_t) -> int32x4_t { - static_assert_uimm_bits!(LANE, 1); +pub fn vmlal_laneq_u16(a: uint32x4_t, b: uint16x4_t, c: uint16x8_t) -> uint32x4_t { + static_assert_uimm_bits!(LANE, 3); unsafe { - vmlaq_s32( + vmlal_u16( a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), ) } } -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_lane_u32)"] +#[doc = "Vector widening multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_lane_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u32", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla, LANE = 1) + assert_instr(umlal, LANE = 1) )] #[rustc_legacy_const_generics(3)] #[cfg_attr( @@ -24221,25 +30047,19 @@ pub fn vmlaq_lane_s32(a: int32x4_t, b: int32x4_t, c: int32x2_t) target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlaq_lane_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x2_t) -> uint32x4_t { +pub fn vmlal_lane_u32(a: uint64x2_t, b: uint32x2_t, c: uint32x2_t) -> uint64x2_t { static_assert_uimm_bits!(LANE, 1); - unsafe { - vmlaq_u32( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) - } + unsafe { vmlal_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) } } -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_laneq_s32)"] +#[doc = "Vector widening multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_laneq_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u32", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla, LANE = 1) + assert_instr(umlal, LANE = 1) )] #[rustc_legacy_const_generics(3)] #[cfg_attr( @@ -24250,27 +30070,20 @@ pub fn vmlaq_lane_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x2 target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlaq_laneq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { +pub fn vmlal_laneq_u32(a: uint64x2_t, b: uint32x2_t, c: uint32x4_t) -> uint64x2_t { static_assert_uimm_bits!(LANE, 2); - unsafe { - vmlaq_s32( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) - } + unsafe { vmlal_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) } } -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_laneq_u32)"] +#[doc = "Vector widening multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_n_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla, LANE = 1) + assert_instr(smlal) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -24279,25 +30092,18 @@ pub fn vmlaq_laneq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlaq_laneq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { - static_assert_uimm_bits!(LANE, 2); - unsafe { - vmlaq_u32( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) - } +pub fn vmlal_n_s16(a: int32x4_t, b: int16x4_t, c: i16) -> int32x4_t { + vmlal_s16(a, b, vdup_n_s16(c)) } -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_n_f32)"] +#[doc = "Vector widening multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_n_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmul) + assert_instr(smlal) )] #[cfg_attr( not(target_arch = "arm"), @@ -24307,18 +30113,18 @@ pub fn vmlaq_laneq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmla_n_f32(a: float32x2_t, b: float32x2_t, c: f32) -> float32x2_t { - vmla_f32(a, b, vdup_n_f32(c)) +pub fn vmlal_n_s32(a: int64x2_t, b: int32x2_t, c: i32) -> int64x2_t { + vmlal_s32(a, b, vdup_n_s32(c)) } -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_n_f32)"] +#[doc = "Vector widening multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_n_u16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmul) + assert_instr(umlal) )] #[cfg_attr( not(target_arch = "arm"), @@ -24328,18 +30134,18 @@ pub fn vmla_n_f32(a: float32x2_t, b: float32x2_t, c: f32) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlaq_n_f32(a: float32x4_t, b: float32x4_t, c: f32) -> float32x4_t { - vmlaq_f32(a, b, vdupq_n_f32(c)) +pub fn vmlal_n_u16(a: uint32x4_t, b: uint16x4_t, c: u16) -> uint32x4_t { + vmlal_u16(a, b, vdup_n_u16(c)) } -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_n_s16)"] +#[doc = "Vector widening multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_n_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla) + assert_instr(umlal) )] #[cfg_attr( not(target_arch = "arm"), @@ -24349,18 +30155,18 @@ pub fn vmlaq_n_f32(a: float32x4_t, b: float32x4_t, c: f32) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmla_n_s16(a: int16x4_t, b: int16x4_t, c: i16) -> int16x4_t { - vmla_s16(a, b, vdup_n_s16(c)) +pub fn vmlal_n_u32(a: uint64x2_t, b: uint32x2_t, c: u32) -> uint64x2_t { + vmlal_u32(a, b, vdup_n_u32(c)) } -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_n_s16)"] +#[doc = "Signed multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_s8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla) + assert_instr(smlal) )] #[cfg_attr( not(target_arch = "arm"), @@ -24370,18 +30176,18 @@ pub fn vmla_n_s16(a: int16x4_t, b: int16x4_t, c: i16) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlaq_n_s16(a: int16x8_t, b: int16x8_t, c: i16) -> int16x8_t { - vmlaq_s16(a, b, vdupq_n_s16(c)) +pub fn vmlal_s8(a: int16x8_t, b: int8x8_t, c: int8x8_t) -> int16x8_t { + unsafe { simd_add(a, vmull_s8(b, c)) } } -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_n_u16)"] +#[doc = "Signed multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla) + assert_instr(smlal) )] #[cfg_attr( not(target_arch = "arm"), @@ -24391,18 +30197,18 @@ pub fn vmlaq_n_s16(a: int16x8_t, b: int16x8_t, c: i16) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmla_n_u16(a: uint16x4_t, b: uint16x4_t, c: u16) -> uint16x4_t { - vmla_u16(a, b, vdup_n_u16(c)) +pub fn vmlal_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { + unsafe { simd_add(a, vmull_s16(b, c)) } } -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_n_u16)"] +#[doc = "Signed multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla) + assert_instr(smlal) )] #[cfg_attr( not(target_arch = "arm"), @@ -24412,18 +30218,18 @@ pub fn vmla_n_u16(a: uint16x4_t, b: uint16x4_t, c: u16) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlaq_n_u16(a: uint16x8_t, b: uint16x8_t, c: u16) -> uint16x8_t { - vmlaq_u16(a, b, vdupq_n_u16(c)) +pub fn vmlal_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { + unsafe { simd_add(a, vmull_s32(b, c)) } } -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_n_s32)"] +#[doc = "Unsigned multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_u8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla) + assert_instr(umlal) )] #[cfg_attr( not(target_arch = "arm"), @@ -24433,18 +30239,18 @@ pub fn vmlaq_n_u16(a: uint16x8_t, b: uint16x8_t, c: u16) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmla_n_s32(a: int32x2_t, b: int32x2_t, c: i32) -> int32x2_t { - vmla_s32(a, b, vdup_n_s32(c)) +pub fn vmlal_u8(a: uint16x8_t, b: uint8x8_t, c: uint8x8_t) -> uint16x8_t { + unsafe { simd_add(a, vmull_u8(b, c)) } } -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_n_s32)"] +#[doc = "Unsigned multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_u16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla) + assert_instr(umlal) )] #[cfg_attr( not(target_arch = "arm"), @@ -24454,18 +30260,18 @@ pub fn vmla_n_s32(a: int32x2_t, b: int32x2_t, c: i32) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlaq_n_s32(a: int32x4_t, b: int32x4_t, c: i32) -> int32x4_t { - vmlaq_s32(a, b, vdupq_n_s32(c)) +pub fn vmlal_u16(a: uint32x4_t, b: uint16x4_t, c: uint16x4_t) -> uint32x4_t { + unsafe { simd_add(a, vmull_u16(b, c)) } } -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_n_u32)"] +#[doc = "Unsigned multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla) + assert_instr(umlal) )] #[cfg_attr( not(target_arch = "arm"), @@ -24475,18 +30281,18 @@ pub fn vmlaq_n_s32(a: int32x4_t, b: int32x4_t, c: i32) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmla_n_u32(a: uint32x2_t, b: uint32x2_t, c: u32) -> uint32x2_t { - vmla_u32(a, b, vdup_n_u32(c)) +pub fn vmlal_u32(a: uint64x2_t, b: uint32x2_t, c: uint32x2_t) -> uint64x2_t { + unsafe { simd_add(a, vmull_u32(b, c)) } } -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_n_u32)"] +#[doc = "Floating-point multiply-subtract from accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_f32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla) + assert_instr(fmul) )] #[cfg_attr( not(target_arch = "arm"), @@ -24496,18 +30302,18 @@ pub fn vmla_n_u32(a: uint32x2_t, b: uint32x2_t, c: u32) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlaq_n_u32(a: uint32x4_t, b: uint32x4_t, c: u32) -> uint32x4_t { - vmlaq_u32(a, b, vdupq_n_u32(c)) +pub fn vmls_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { + unsafe { simd_sub(a, simd_mul(b, c)) } } -#[doc = "Multiply-add to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_s8)"] +#[doc = "Floating-point multiply-subtract from accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_f32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla) + assert_instr(fmul) )] #[cfg_attr( not(target_arch = "arm"), @@ -24517,19 +30323,20 @@ pub fn vmlaq_n_u32(a: uint32x4_t, b: uint32x4_t, c: u32) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmla_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { - unsafe { simd_add(a, simd_mul(b, c)) } +pub fn vmlsq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { + unsafe { simd_sub(a, simd_mul(b, c)) } } -#[doc = "Multiply-add to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_s8)"] +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_lane_f32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla) + assert_instr(fmul, LANE = 1) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -24538,19 +30345,25 @@ pub fn vmla_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlaq_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t { - unsafe { simd_add(a, simd_mul(b, c)) } +pub fn vmls_lane_f32( + a: float32x2_t, + b: float32x2_t, + c: float32x2_t, +) -> float32x2_t { + static_assert_uimm_bits!(LANE, 1); + unsafe { vmls_f32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) } } -#[doc = "Multiply-add to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_s16)"] +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_laneq_f32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla) + assert_instr(fmul, LANE = 1) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -24559,19 +30372,25 @@ pub fn vmlaq_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmla_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { - unsafe { simd_add(a, simd_mul(b, c)) } +pub fn vmls_laneq_f32( + a: float32x2_t, + b: float32x2_t, + c: float32x4_t, +) -> float32x2_t { + static_assert_uimm_bits!(LANE, 2); + unsafe { vmls_f32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) } } -#[doc = "Multiply-add to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_s16)"] +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_lane_f32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla) + assert_instr(fmul, LANE = 1) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -24580,19 +30399,31 @@ pub fn vmla_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlaq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { - unsafe { simd_add(a, simd_mul(b, c)) } +pub fn vmlsq_lane_f32( + a: float32x4_t, + b: float32x4_t, + c: float32x2_t, +) -> float32x4_t { + static_assert_uimm_bits!(LANE, 1); + unsafe { + vmlsq_f32( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) + } } -#[doc = "Multiply-add to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_s32)"] +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_laneq_f32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla) + assert_instr(fmul, LANE = 1) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -24601,19 +30432,31 @@ pub fn vmlaq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmla_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { - unsafe { simd_add(a, simd_mul(b, c)) } +pub fn vmlsq_laneq_f32( + a: float32x4_t, + b: float32x4_t, + c: float32x4_t, +) -> float32x4_t { + static_assert_uimm_bits!(LANE, 2); + unsafe { + vmlsq_f32( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) + } } -#[doc = "Multiply-add to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_s32)"] +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_lane_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla) + assert_instr(mls, LANE = 1) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -24622,19 +30465,27 @@ pub fn vmla_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlaq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { - unsafe { simd_add(a, simd_mul(b, c)) } +pub fn vmls_lane_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { + static_assert_uimm_bits!(LANE, 2); + unsafe { + vmls_s16( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) + } } -#[doc = "Multiply-add to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_u8)"] +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_lane_u16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla) + assert_instr(mls, LANE = 1) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -24643,19 +30494,27 @@ pub fn vmlaq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmla_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t { - unsafe { simd_add(a, simd_mul(b, c)) } +pub fn vmls_lane_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x4_t) -> uint16x4_t { + static_assert_uimm_bits!(LANE, 2); + unsafe { + vmls_u16( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) + } } -#[doc = "Multiply-add to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_u8)"] +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_laneq_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla) + assert_instr(mls, LANE = 1) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -24664,19 +30523,98 @@ pub fn vmla_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlaq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t { - unsafe { simd_add(a, simd_mul(b, c)) } +pub fn vmls_laneq_s16(a: int16x4_t, b: int16x4_t, c: int16x8_t) -> int16x4_t { + static_assert_uimm_bits!(LANE, 3); + unsafe { + vmls_s16( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) + } } -#[doc = "Multiply-add to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_u16)"] +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_laneq_u16)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mls, LANE = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vmls_laneq_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x8_t) -> uint16x4_t { + static_assert_uimm_bits!(LANE, 3); + unsafe { + vmls_u16( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) + } +} +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_lane_s16)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mls, LANE = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vmlsq_lane_s16(a: int16x8_t, b: int16x8_t, c: int16x4_t) -> int16x8_t { + static_assert_uimm_bits!(LANE, 2); + unsafe { + vmlsq_s16( + a, + b, + simd_shuffle!( + c, + c, + [ + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32 + ] + ), + ) + } +} +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_lane_u16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla) + assert_instr(mls, LANE = 1) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -24685,19 +30623,40 @@ pub fn vmlaq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmla_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x4_t) -> uint16x4_t { - unsafe { simd_add(a, simd_mul(b, c)) } +pub fn vmlsq_lane_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x4_t) -> uint16x8_t { + static_assert_uimm_bits!(LANE, 2); + unsafe { + vmlsq_u16( + a, + b, + simd_shuffle!( + c, + c, + [ + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32 + ] + ), + ) + } } -#[doc = "Multiply-add to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_u16)"] +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_laneq_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla) + assert_instr(mls, LANE = 1) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -24706,19 +30665,40 @@ pub fn vmla_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlaq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t { - unsafe { simd_add(a, simd_mul(b, c)) } +pub fn vmlsq_laneq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { + static_assert_uimm_bits!(LANE, 3); + unsafe { + vmlsq_s16( + a, + b, + simd_shuffle!( + c, + c, + [ + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32 + ] + ), + ) + } } -#[doc = "Multiply-add to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_u32)"] +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_laneq_u16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla) + assert_instr(mls, LANE = 1) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -24727,19 +30707,40 @@ pub fn vmlaq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmla_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x2_t) -> uint32x2_t { - unsafe { simd_add(a, simd_mul(b, c)) } +pub fn vmlsq_laneq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t { + static_assert_uimm_bits!(LANE, 3); + unsafe { + vmlsq_u16( + a, + b, + simd_shuffle!( + c, + c, + [ + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32 + ] + ), + ) + } } -#[doc = "Multiply-add to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_u32)"] +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_lane_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla) + assert_instr(mls, LANE = 1) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -24748,18 +30749,19 @@ pub fn vmla_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlaq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { - unsafe { simd_add(a, simd_mul(b, c)) } +pub fn vmls_lane_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { + static_assert_uimm_bits!(LANE, 1); + unsafe { vmls_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) } } -#[doc = "Vector widening multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_lane_s16)"] +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_lane_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s16", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smlal, LANE = 1) + assert_instr(mls, LANE = 1) )] #[rustc_legacy_const_generics(3)] #[cfg_attr( @@ -24770,25 +30772,19 @@ pub fn vmlaq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlal_lane_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { - static_assert_uimm_bits!(LANE, 2); - unsafe { - vmlal_s16( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) - } +pub fn vmls_lane_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x2_t) -> uint32x2_t { + static_assert_uimm_bits!(LANE, 1); + unsafe { vmls_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) } } -#[doc = "Vector widening multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_laneq_s16)"] +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_laneq_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s16", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smlal, LANE = 1) + assert_instr(mls, LANE = 1) )] #[rustc_legacy_const_generics(3)] #[cfg_attr( @@ -24799,25 +30795,19 @@ pub fn vmlal_lane_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlal_laneq_s16(a: int32x4_t, b: int16x4_t, c: int16x8_t) -> int32x4_t { - static_assert_uimm_bits!(LANE, 3); - unsafe { - vmlal_s16( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) - } +pub fn vmls_laneq_s32(a: int32x2_t, b: int32x2_t, c: int32x4_t) -> int32x2_t { + static_assert_uimm_bits!(LANE, 2); + unsafe { vmls_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) } } -#[doc = "Vector widening multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_lane_s32)"] +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_laneq_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s32", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smlal, LANE = 1) + assert_instr(mls, LANE = 1) )] #[rustc_legacy_const_generics(3)] #[cfg_attr( @@ -24828,19 +30818,19 @@ pub fn vmlal_laneq_s16(a: int32x4_t, b: int16x4_t, c: int16x8_t target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlal_lane_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { - static_assert_uimm_bits!(LANE, 1); - unsafe { vmlal_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) } +pub fn vmls_laneq_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x4_t) -> uint32x2_t { + static_assert_uimm_bits!(LANE, 2); + unsafe { vmls_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) } } -#[doc = "Vector widening multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_laneq_s32)"] +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_lane_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s32", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smlal, LANE = 1) + assert_instr(mls, LANE = 1) )] #[rustc_legacy_const_generics(3)] #[cfg_attr( @@ -24851,19 +30841,25 @@ pub fn vmlal_lane_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlal_laneq_s32(a: int64x2_t, b: int32x2_t, c: int32x4_t) -> int64x2_t { - static_assert_uimm_bits!(LANE, 2); - unsafe { vmlal_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) } +pub fn vmlsq_lane_s32(a: int32x4_t, b: int32x4_t, c: int32x2_t) -> int32x4_t { + static_assert_uimm_bits!(LANE, 1); + unsafe { + vmlsq_s32( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) + } } -#[doc = "Vector widening multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_lane_u16)"] +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_lane_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u16", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umlal, LANE = 1) + assert_instr(mls, LANE = 1) )] #[rustc_legacy_const_generics(3)] #[cfg_attr( @@ -24874,25 +30870,25 @@ pub fn vmlal_laneq_s32(a: int64x2_t, b: int32x2_t, c: int32x4_t target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlal_lane_u16(a: uint32x4_t, b: uint16x4_t, c: uint16x4_t) -> uint32x4_t { - static_assert_uimm_bits!(LANE, 2); +pub fn vmlsq_lane_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x2_t) -> uint32x4_t { + static_assert_uimm_bits!(LANE, 1); unsafe { - vmlal_u16( + vmlsq_u32( a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), ) } } -#[doc = "Vector widening multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_laneq_u16)"] +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_laneq_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u16", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umlal, LANE = 1) + assert_instr(mls, LANE = 1) )] #[rustc_legacy_const_generics(3)] #[cfg_attr( @@ -24903,25 +30899,25 @@ pub fn vmlal_lane_u16(a: uint32x4_t, b: uint16x4_t, c: uint16x4 target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlal_laneq_u16(a: uint32x4_t, b: uint16x4_t, c: uint16x8_t) -> uint32x4_t { - static_assert_uimm_bits!(LANE, 3); +pub fn vmlsq_laneq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { + static_assert_uimm_bits!(LANE, 2); unsafe { - vmlal_u16( + vmlsq_s32( a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), ) } } -#[doc = "Vector widening multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_lane_u32)"] +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_laneq_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u32", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umlal, LANE = 1) + assert_instr(mls, LANE = 1) )] #[rustc_legacy_const_generics(3)] #[cfg_attr( @@ -24932,21 +30928,26 @@ pub fn vmlal_laneq_u16(a: uint32x4_t, b: uint16x4_t, c: uint16x target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlal_lane_u32(a: uint64x2_t, b: uint32x2_t, c: uint32x2_t) -> uint64x2_t { - static_assert_uimm_bits!(LANE, 1); - unsafe { vmlal_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) } +pub fn vmlsq_laneq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { + static_assert_uimm_bits!(LANE, 2); + unsafe { + vmlsq_u32( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) + } } -#[doc = "Vector widening multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_laneq_u32)"] +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_n_f32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u32", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umlal, LANE = 1) + assert_instr(fmul) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -24955,19 +30956,18 @@ pub fn vmlal_lane_u32(a: uint64x2_t, b: uint32x2_t, c: uint32x2 target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlal_laneq_u32(a: uint64x2_t, b: uint32x2_t, c: uint32x4_t) -> uint64x2_t { - static_assert_uimm_bits!(LANE, 2); - unsafe { vmlal_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) } +pub fn vmls_n_f32(a: float32x2_t, b: float32x2_t, c: f32) -> float32x2_t { + vmls_f32(a, b, vdup_n_f32(c)) } -#[doc = "Vector widening multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_n_s16)"] +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_n_f32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smlal) + assert_instr(fmul) )] #[cfg_attr( not(target_arch = "arm"), @@ -24977,18 +30977,18 @@ pub fn vmlal_laneq_u32(a: uint64x2_t, b: uint32x2_t, c: uint32x target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlal_n_s16(a: int32x4_t, b: int16x4_t, c: i16) -> int32x4_t { - vmlal_s16(a, b, vdup_n_s16(c)) +pub fn vmlsq_n_f32(a: float32x4_t, b: float32x4_t, c: f32) -> float32x4_t { + vmlsq_f32(a, b, vdupq_n_f32(c)) } -#[doc = "Vector widening multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_n_s32)"] +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_n_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smlal) + assert_instr(mls) )] #[cfg_attr( not(target_arch = "arm"), @@ -24998,18 +30998,18 @@ pub fn vmlal_n_s16(a: int32x4_t, b: int16x4_t, c: i16) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlal_n_s32(a: int64x2_t, b: int32x2_t, c: i32) -> int64x2_t { - vmlal_s32(a, b, vdup_n_s32(c)) +pub fn vmls_n_s16(a: int16x4_t, b: int16x4_t, c: i16) -> int16x4_t { + vmls_s16(a, b, vdup_n_s16(c)) } -#[doc = "Vector widening multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_n_u16)"] +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_n_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umlal) + assert_instr(mls) )] #[cfg_attr( not(target_arch = "arm"), @@ -25019,18 +31019,18 @@ pub fn vmlal_n_s32(a: int64x2_t, b: int32x2_t, c: i32) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlal_n_u16(a: uint32x4_t, b: uint16x4_t, c: u16) -> uint32x4_t { - vmlal_u16(a, b, vdup_n_u16(c)) +pub fn vmlsq_n_s16(a: int16x8_t, b: int16x8_t, c: i16) -> int16x8_t { + vmlsq_s16(a, b, vdupq_n_s16(c)) } -#[doc = "Vector widening multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_n_u32)"] +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_n_u16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umlal) + assert_instr(mls) )] #[cfg_attr( not(target_arch = "arm"), @@ -25040,18 +31040,18 @@ pub fn vmlal_n_u16(a: uint32x4_t, b: uint16x4_t, c: u16) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlal_n_u32(a: uint64x2_t, b: uint32x2_t, c: u32) -> uint64x2_t { - vmlal_u32(a, b, vdup_n_u32(c)) +pub fn vmls_n_u16(a: uint16x4_t, b: uint16x4_t, c: u16) -> uint16x4_t { + vmls_u16(a, b, vdup_n_u16(c)) } -#[doc = "Signed multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_s8)"] +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_n_u16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smlal) + assert_instr(mls) )] #[cfg_attr( not(target_arch = "arm"), @@ -25061,18 +31061,18 @@ pub fn vmlal_n_u32(a: uint64x2_t, b: uint32x2_t, c: u32) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlal_s8(a: int16x8_t, b: int8x8_t, c: int8x8_t) -> int16x8_t { - unsafe { simd_add(a, vmull_s8(b, c)) } +pub fn vmlsq_n_u16(a: uint16x8_t, b: uint16x8_t, c: u16) -> uint16x8_t { + vmlsq_u16(a, b, vdupq_n_u16(c)) } -#[doc = "Signed multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_s16)"] +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_n_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smlal) + assert_instr(mls) )] #[cfg_attr( not(target_arch = "arm"), @@ -25082,18 +31082,18 @@ pub fn vmlal_s8(a: int16x8_t, b: int8x8_t, c: int8x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlal_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { - unsafe { simd_add(a, vmull_s16(b, c)) } +pub fn vmls_n_s32(a: int32x2_t, b: int32x2_t, c: i32) -> int32x2_t { + vmls_s32(a, b, vdup_n_s32(c)) } -#[doc = "Signed multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_s32)"] +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_n_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smlal) + assert_instr(mls) )] #[cfg_attr( not(target_arch = "arm"), @@ -25103,18 +31103,18 @@ pub fn vmlal_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlal_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { - unsafe { simd_add(a, vmull_s32(b, c)) } +pub fn vmlsq_n_s32(a: int32x4_t, b: int32x4_t, c: i32) -> int32x4_t { + vmlsq_s32(a, b, vdupq_n_s32(c)) } -#[doc = "Unsigned multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_u8)"] +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_n_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umlal) + assert_instr(mls) )] #[cfg_attr( not(target_arch = "arm"), @@ -25124,18 +31124,18 @@ pub fn vmlal_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlal_u8(a: uint16x8_t, b: uint8x8_t, c: uint8x8_t) -> uint16x8_t { - unsafe { simd_add(a, vmull_u8(b, c)) } +pub fn vmls_n_u32(a: uint32x2_t, b: uint32x2_t, c: u32) -> uint32x2_t { + vmls_u32(a, b, vdup_n_u32(c)) } -#[doc = "Unsigned multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_u16)"] +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_n_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umlal) + assert_instr(mls) )] #[cfg_attr( not(target_arch = "arm"), @@ -25145,18 +31145,18 @@ pub fn vmlal_u8(a: uint16x8_t, b: uint8x8_t, c: uint8x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlal_u16(a: uint32x4_t, b: uint16x4_t, c: uint16x4_t) -> uint32x4_t { - unsafe { simd_add(a, vmull_u16(b, c)) } +pub fn vmlsq_n_u32(a: uint32x4_t, b: uint32x4_t, c: u32) -> uint32x4_t { + vmlsq_u32(a, b, vdupq_n_u32(c)) } -#[doc = "Unsigned multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_u32)"] +#[doc = "Multiply-subtract from accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_s8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umlal) + assert_instr(mls) )] #[cfg_attr( not(target_arch = "arm"), @@ -25166,18 +31166,18 @@ pub fn vmlal_u16(a: uint32x4_t, b: uint16x4_t, c: uint16x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlal_u32(a: uint64x2_t, b: uint32x2_t, c: uint32x2_t) -> uint64x2_t { - unsafe { simd_add(a, vmull_u32(b, c)) } +pub fn vmls_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { + unsafe { simd_sub(a, simd_mul(b, c)) } } -#[doc = "Floating-point multiply-subtract from accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_f32)"] +#[doc = "Multiply-subtract from accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_s8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmul) + assert_instr(mls) )] #[cfg_attr( not(target_arch = "arm"), @@ -25187,18 +31187,18 @@ pub fn vmlal_u32(a: uint64x2_t, b: uint32x2_t, c: uint32x2_t) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmls_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { +pub fn vmlsq_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t { unsafe { simd_sub(a, simd_mul(b, c)) } } -#[doc = "Floating-point multiply-subtract from accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_f32)"] +#[doc = "Multiply-subtract from accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmul) + assert_instr(mls) )] #[cfg_attr( not(target_arch = "arm"), @@ -25208,20 +31208,19 @@ pub fn vmls_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlsq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { +pub fn vmls_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { unsafe { simd_sub(a, simd_mul(b, c)) } } -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_lane_f32)"] +#[doc = "Multiply-subtract from accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmul, LANE = 1) + assert_instr(mls) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -25230,25 +31229,19 @@ pub fn vmlsq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmls_lane_f32( - a: float32x2_t, - b: float32x2_t, - c: float32x2_t, -) -> float32x2_t { - static_assert_uimm_bits!(LANE, 1); - unsafe { vmls_f32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) } +pub fn vmlsq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { + unsafe { simd_sub(a, simd_mul(b, c)) } } -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_laneq_f32)"] +#[doc = "Multiply-subtract from accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmul, LANE = 1) + assert_instr(mls) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -25257,25 +31250,19 @@ pub fn vmls_lane_f32( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmls_laneq_f32( - a: float32x2_t, - b: float32x2_t, - c: float32x4_t, -) -> float32x2_t { - static_assert_uimm_bits!(LANE, 2); - unsafe { vmls_f32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) } +pub fn vmls_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { + unsafe { simd_sub(a, simd_mul(b, c)) } } -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_lane_f32)"] +#[doc = "Multiply-subtract from accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmul, LANE = 1) + assert_instr(mls) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -25284,31 +31271,19 @@ pub fn vmls_laneq_f32( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlsq_lane_f32( - a: float32x4_t, - b: float32x4_t, - c: float32x2_t, -) -> float32x4_t { - static_assert_uimm_bits!(LANE, 1); - unsafe { - vmlsq_f32( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) - } +pub fn vmlsq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { + unsafe { simd_sub(a, simd_mul(b, c)) } } -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_laneq_f32)"] +#[doc = "Multiply-subtract from accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_u8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmul, LANE = 1) + assert_instr(mls) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -25317,31 +31292,19 @@ pub fn vmlsq_lane_f32( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlsq_laneq_f32( - a: float32x4_t, - b: float32x4_t, - c: float32x4_t, -) -> float32x4_t { - static_assert_uimm_bits!(LANE, 2); - unsafe { - vmlsq_f32( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) - } +pub fn vmls_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t { + unsafe { simd_sub(a, simd_mul(b, c)) } } -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_lane_s16)"] +#[doc = "Multiply-subtract from accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_u8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls, LANE = 1) + assert_instr(mls) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -25350,27 +31313,19 @@ pub fn vmlsq_laneq_f32( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmls_lane_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { - static_assert_uimm_bits!(LANE, 2); - unsafe { - vmls_s16( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) - } +pub fn vmlsq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t { + unsafe { simd_sub(a, simd_mul(b, c)) } } -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_lane_u16)"] +#[doc = "Multiply-subtract from accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_u16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls, LANE = 1) + assert_instr(mls) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -25379,27 +31334,19 @@ pub fn vmls_lane_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmls_lane_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x4_t) -> uint16x4_t { - static_assert_uimm_bits!(LANE, 2); - unsafe { - vmls_u16( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) - } +pub fn vmls_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x4_t) -> uint16x4_t { + unsafe { simd_sub(a, simd_mul(b, c)) } } -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_laneq_s16)"] +#[doc = "Multiply-subtract from accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_u16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls, LANE = 1) + assert_instr(mls) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -25408,27 +31355,19 @@ pub fn vmls_lane_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x4_ target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmls_laneq_s16(a: int16x4_t, b: int16x4_t, c: int16x8_t) -> int16x4_t { - static_assert_uimm_bits!(LANE, 3); - unsafe { - vmls_s16( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) - } +pub fn vmlsq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t { + unsafe { simd_sub(a, simd_mul(b, c)) } } -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_laneq_u16)"] +#[doc = "Multiply-subtract from accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls, LANE = 1) + assert_instr(mls) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -25437,27 +31376,19 @@ pub fn vmls_laneq_s16(a: int16x4_t, b: int16x4_t, c: int16x8_t) target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmls_laneq_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x8_t) -> uint16x4_t { - static_assert_uimm_bits!(LANE, 3); - unsafe { - vmls_u16( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) - } +pub fn vmls_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x2_t) -> uint32x2_t { + unsafe { simd_sub(a, simd_mul(b, c)) } } -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_lane_s16)"] +#[doc = "Multiply-subtract from accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls, LANE = 1) + assert_instr(mls) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -25466,38 +31397,18 @@ pub fn vmls_laneq_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x8 target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlsq_lane_s16(a: int16x8_t, b: int16x8_t, c: int16x4_t) -> int16x8_t { - static_assert_uimm_bits!(LANE, 2); - unsafe { - vmlsq_s16( - a, - b, - simd_shuffle!( - c, - c, - [ - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32 - ] - ), - ) - } +pub fn vmlsq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { + unsafe { simd_sub(a, simd_mul(b, c)) } } -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_lane_u16)"] +#[doc = "Vector widening multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_lane_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s16", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls, LANE = 1) + assert_instr(smlsl, LANE = 1) )] #[rustc_legacy_const_generics(3)] #[cfg_attr( @@ -25508,38 +31419,25 @@ pub fn vmlsq_lane_s16(a: int16x8_t, b: int16x8_t, c: int16x4_t) target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlsq_lane_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x4_t) -> uint16x8_t { +pub fn vmlsl_lane_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { static_assert_uimm_bits!(LANE, 2); unsafe { - vmlsq_u16( + vmlsl_s16( a, b, - simd_shuffle!( - c, - c, - [ - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32 - ] - ), + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), ) } } -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_laneq_s16)"] +#[doc = "Vector widening multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_laneq_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s16", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls, LANE = 1) + assert_instr(smlsl, LANE = 1) )] #[rustc_legacy_const_generics(3)] #[cfg_attr( @@ -25550,38 +31448,25 @@ pub fn vmlsq_lane_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x4 target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlsq_laneq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { +pub fn vmlsl_laneq_s16(a: int32x4_t, b: int16x4_t, c: int16x8_t) -> int32x4_t { static_assert_uimm_bits!(LANE, 3); unsafe { - vmlsq_s16( + vmlsl_s16( a, b, - simd_shuffle!( - c, - c, - [ - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32 - ] - ), + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), ) } } -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_laneq_u16)"] +#[doc = "Vector widening multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_lane_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s32", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls, LANE = 1) + assert_instr(smlsl, LANE = 1) )] #[rustc_legacy_const_generics(3)] #[cfg_attr( @@ -25592,38 +31477,19 @@ pub fn vmlsq_laneq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlsq_laneq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t { - static_assert_uimm_bits!(LANE, 3); - unsafe { - vmlsq_u16( - a, - b, - simd_shuffle!( - c, - c, - [ - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32 - ] - ), - ) - } +pub fn vmlsl_lane_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { + static_assert_uimm_bits!(LANE, 1); + unsafe { vmlsl_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) } } -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_lane_s32)"] +#[doc = "Vector widening multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_laneq_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s32", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls, LANE = 1) + assert_instr(smlsl, LANE = 1) )] #[rustc_legacy_const_generics(3)] #[cfg_attr( @@ -25634,19 +31500,19 @@ pub fn vmlsq_laneq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmls_lane_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { - static_assert_uimm_bits!(LANE, 1); - unsafe { vmls_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) } +pub fn vmlsl_laneq_s32(a: int64x2_t, b: int32x2_t, c: int32x4_t) -> int64x2_t { + static_assert_uimm_bits!(LANE, 2); + unsafe { vmlsl_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) } } -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_lane_u32)"] +#[doc = "Vector widening multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_lane_u16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u16", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls, LANE = 1) + assert_instr(umlsl, LANE = 1) )] #[rustc_legacy_const_generics(3)] #[cfg_attr( @@ -25657,19 +31523,25 @@ pub fn vmls_lane_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmls_lane_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x2_t) -> uint32x2_t { - static_assert_uimm_bits!(LANE, 1); - unsafe { vmls_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) } +pub fn vmlsl_lane_u16(a: uint32x4_t, b: uint16x4_t, c: uint16x4_t) -> uint32x4_t { + static_assert_uimm_bits!(LANE, 2); + unsafe { + vmlsl_u16( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) + } } -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_laneq_s32)"] +#[doc = "Vector widening multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_laneq_u16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u16", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls, LANE = 1) + assert_instr(umlsl, LANE = 1) )] #[rustc_legacy_const_generics(3)] #[cfg_attr( @@ -25680,19 +31552,25 @@ pub fn vmls_lane_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x2_ target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmls_laneq_s32(a: int32x2_t, b: int32x2_t, c: int32x4_t) -> int32x2_t { - static_assert_uimm_bits!(LANE, 2); - unsafe { vmls_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) } +pub fn vmlsl_laneq_u16(a: uint32x4_t, b: uint16x4_t, c: uint16x8_t) -> uint32x4_t { + static_assert_uimm_bits!(LANE, 3); + unsafe { + vmlsl_u16( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) + } } -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_laneq_u32)"] +#[doc = "Vector widening multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_lane_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u32", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls, LANE = 1) + assert_instr(umlsl, LANE = 1) )] #[rustc_legacy_const_generics(3)] #[cfg_attr( @@ -25703,19 +31581,19 @@ pub fn vmls_laneq_s32(a: int32x2_t, b: int32x2_t, c: int32x4_t) target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmls_laneq_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x4_t) -> uint32x2_t { - static_assert_uimm_bits!(LANE, 2); - unsafe { vmls_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) } +pub fn vmlsl_lane_u32(a: uint64x2_t, b: uint32x2_t, c: uint32x2_t) -> uint64x2_t { + static_assert_uimm_bits!(LANE, 1); + unsafe { vmlsl_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) } } -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_lane_s32)"] +#[doc = "Vector widening multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_laneq_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u32", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls, LANE = 1) + assert_instr(umlsl, LANE = 1) )] #[rustc_legacy_const_generics(3)] #[cfg_attr( @@ -25726,27 +31604,20 @@ pub fn vmls_laneq_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x4 target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlsq_lane_s32(a: int32x4_t, b: int32x4_t, c: int32x2_t) -> int32x4_t { - static_assert_uimm_bits!(LANE, 1); - unsafe { - vmlsq_s32( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) - } +pub fn vmlsl_laneq_u32(a: uint64x2_t, b: uint32x2_t, c: uint32x4_t) -> uint64x2_t { + static_assert_uimm_bits!(LANE, 2); + unsafe { vmlsl_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) } } -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_lane_u32)"] +#[doc = "Vector widening multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_n_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls, LANE = 1) + assert_instr(smlsl) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -25755,27 +31626,19 @@ pub fn vmlsq_lane_s32(a: int32x4_t, b: int32x4_t, c: int32x2_t) target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlsq_lane_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x2_t) -> uint32x4_t { - static_assert_uimm_bits!(LANE, 1); - unsafe { - vmlsq_u32( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) - } +pub fn vmlsl_n_s16(a: int32x4_t, b: int16x4_t, c: i16) -> int32x4_t { + vmlsl_s16(a, b, vdup_n_s16(c)) } -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_laneq_s32)"] +#[doc = "Vector widening multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_n_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls, LANE = 1) + assert_instr(smlsl) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -25784,27 +31647,19 @@ pub fn vmlsq_lane_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x2 target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlsq_laneq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { - static_assert_uimm_bits!(LANE, 2); - unsafe { - vmlsq_s32( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) - } +pub fn vmlsl_n_s32(a: int64x2_t, b: int32x2_t, c: i32) -> int64x2_t { + vmlsl_s32(a, b, vdup_n_s32(c)) } -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_laneq_u32)"] +#[doc = "Vector widening multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_n_u16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls, LANE = 1) + assert_instr(umlsl) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -25813,25 +31668,18 @@ pub fn vmlsq_laneq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlsq_laneq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { - static_assert_uimm_bits!(LANE, 2); - unsafe { - vmlsq_u32( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) - } +pub fn vmlsl_n_u16(a: uint32x4_t, b: uint16x4_t, c: u16) -> uint32x4_t { + vmlsl_u16(a, b, vdup_n_u16(c)) } -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_n_f32)"] +#[doc = "Vector widening multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_n_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmul) + assert_instr(umlsl) )] #[cfg_attr( not(target_arch = "arm"), @@ -25841,18 +31689,18 @@ pub fn vmlsq_laneq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmls_n_f32(a: float32x2_t, b: float32x2_t, c: f32) -> float32x2_t { - vmls_f32(a, b, vdup_n_f32(c)) +pub fn vmlsl_n_u32(a: uint64x2_t, b: uint32x2_t, c: u32) -> uint64x2_t { + vmlsl_u32(a, b, vdup_n_u32(c)) } -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_n_f32)"] +#[doc = "Signed multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_s8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmul) + assert_instr(smlsl) )] #[cfg_attr( not(target_arch = "arm"), @@ -25862,18 +31710,18 @@ pub fn vmls_n_f32(a: float32x2_t, b: float32x2_t, c: f32) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlsq_n_f32(a: float32x4_t, b: float32x4_t, c: f32) -> float32x4_t { - vmlsq_f32(a, b, vdupq_n_f32(c)) +pub fn vmlsl_s8(a: int16x8_t, b: int8x8_t, c: int8x8_t) -> int16x8_t { + unsafe { simd_sub(a, vmull_s8(b, c)) } } -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_n_s16)"] +#[doc = "Signed multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls) + assert_instr(smlsl) )] #[cfg_attr( not(target_arch = "arm"), @@ -25883,18 +31731,18 @@ pub fn vmlsq_n_f32(a: float32x4_t, b: float32x4_t, c: f32) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmls_n_s16(a: int16x4_t, b: int16x4_t, c: i16) -> int16x4_t { - vmls_s16(a, b, vdup_n_s16(c)) +pub fn vmlsl_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { + unsafe { simd_sub(a, vmull_s16(b, c)) } } -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_n_s16)"] +#[doc = "Signed multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls) + assert_instr(smlsl) )] #[cfg_attr( not(target_arch = "arm"), @@ -25904,18 +31752,18 @@ pub fn vmls_n_s16(a: int16x4_t, b: int16x4_t, c: i16) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlsq_n_s16(a: int16x8_t, b: int16x8_t, c: i16) -> int16x8_t { - vmlsq_s16(a, b, vdupq_n_s16(c)) +pub fn vmlsl_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { + unsafe { simd_sub(a, vmull_s32(b, c)) } } -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_n_u16)"] +#[doc = "Unsigned multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_u8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls) + assert_instr(umlsl) )] #[cfg_attr( not(target_arch = "arm"), @@ -25925,18 +31773,18 @@ pub fn vmlsq_n_s16(a: int16x8_t, b: int16x8_t, c: i16) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmls_n_u16(a: uint16x4_t, b: uint16x4_t, c: u16) -> uint16x4_t { - vmls_u16(a, b, vdup_n_u16(c)) +pub fn vmlsl_u8(a: uint16x8_t, b: uint8x8_t, c: uint8x8_t) -> uint16x8_t { + unsafe { simd_sub(a, vmull_u8(b, c)) } } -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_n_u16)"] +#[doc = "Unsigned multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_u16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls) + assert_instr(umlsl) )] #[cfg_attr( not(target_arch = "arm"), @@ -25946,18 +31794,18 @@ pub fn vmls_n_u16(a: uint16x4_t, b: uint16x4_t, c: u16) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlsq_n_u16(a: uint16x8_t, b: uint16x8_t, c: u16) -> uint16x8_t { - vmlsq_u16(a, b, vdupq_n_u16(c)) +pub fn vmlsl_u16(a: uint32x4_t, b: uint16x4_t, c: uint16x4_t) -> uint32x4_t { + unsafe { simd_sub(a, vmull_u16(b, c)) } } -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_n_s32)"] +#[doc = "Unsigned multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls) + assert_instr(umlsl) )] #[cfg_attr( not(target_arch = "arm"), @@ -25967,60 +31815,104 @@ pub fn vmlsq_n_u16(a: uint16x8_t, b: uint16x8_t, c: u16) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmls_n_s32(a: int32x2_t, b: int32x2_t, c: i32) -> int32x2_t { - vmls_s32(a, b, vdup_n_s32(c)) +pub fn vmlsl_u32(a: uint64x2_t, b: uint32x2_t, c: uint32x2_t) -> uint64x2_t { + unsafe { simd_sub(a, vmull_u32(b, c)) } } -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_n_s32)"] +#[doc = "8-bit integer matrix multiply-accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmmlaq_s32)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))] +#[target_feature(enable = "neon,i8mm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls) + assert_instr(smmla) )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + unstable(feature = "stdarch_neon_i8mm", issue = "117223") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlsq_n_s32(a: int32x4_t, b: int32x4_t, c: i32) -> int32x4_t { - vmlsq_s32(a, b, vdupq_n_s32(c)) +pub fn vmmlaq_s32(a: int32x4_t, b: int8x16_t, c: int8x16_t) -> int32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.smmla.v4i32.v16i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.smmla.v4i32.v16i8")] + fn _vmmlaq_s32(a: int32x4_t, b: int8x16_t, c: int8x16_t) -> int32x4_t; + } + unsafe { _vmmlaq_s32(a, b, c) } } -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_n_u32)"] +#[doc = "8-bit integer matrix multiply-accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmmlaq_u32)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))] +#[target_feature(enable = "neon,i8mm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls) + assert_instr(ummla) )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + unstable(feature = "stdarch_neon_i8mm", issue = "117223") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmls_n_u32(a: uint32x2_t, b: uint32x2_t, c: u32) -> uint32x2_t { - vmls_u32(a, b, vdup_n_u32(c)) +pub fn vmmlaq_u32(a: uint32x4_t, b: uint8x16_t, c: uint8x16_t) -> uint32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ummla.v4i32.v16i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.ummla.v4i32.v16i8")] + fn _vmmlaq_u32(a: uint32x4_t, b: uint8x16_t, c: uint8x16_t) -> uint32x4_t; + } + unsafe { _vmmlaq_u32(a, b, c) } } -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_n_u32)"] +#[doc = "Duplicate element to vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmov_n_f16)"] +#[inline] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(dup) +)] +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +pub fn vmov_n_f16(a: f16) -> float16x4_t { + vdup_n_f16(a) +} +#[doc = "Duplicate element to vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovq_n_f16)"] +#[inline] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(dup) +)] +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +pub fn vmovq_n_f16(a: f16) -> float16x8_t { + vdupq_n_f16(a) +} +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmov_n_f32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls) + assert_instr(dup) )] #[cfg_attr( not(target_arch = "arm"), @@ -26030,18 +31922,18 @@ pub fn vmls_n_u32(a: uint32x2_t, b: uint32x2_t, c: u32) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlsq_n_u32(a: uint32x4_t, b: uint32x4_t, c: u32) -> uint32x4_t { - vmlsq_u32(a, b, vdupq_n_u32(c)) +pub fn vmov_n_f32(value: f32) -> float32x2_t { + vdup_n_f32(value) } -#[doc = "Multiply-subtract from accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_s8)"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmov_n_p16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls) + assert_instr(dup) )] #[cfg_attr( not(target_arch = "arm"), @@ -26051,18 +31943,18 @@ pub fn vmlsq_n_u32(a: uint32x4_t, b: uint32x4_t, c: u32) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmls_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { - unsafe { simd_sub(a, simd_mul(b, c)) } +pub fn vmov_n_p16(value: p16) -> poly16x4_t { + vdup_n_p16(value) } -#[doc = "Multiply-subtract from accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_s8)"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmov_n_p8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls) + assert_instr(dup) )] #[cfg_attr( not(target_arch = "arm"), @@ -26072,18 +31964,18 @@ pub fn vmls_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlsq_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t { - unsafe { simd_sub(a, simd_mul(b, c)) } +pub fn vmov_n_p8(value: p8) -> poly8x8_t { + vdup_n_p8(value) } -#[doc = "Multiply-subtract from accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_s16)"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmov_n_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls) + assert_instr(dup) )] #[cfg_attr( not(target_arch = "arm"), @@ -26093,18 +31985,18 @@ pub fn vmlsq_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmls_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { - unsafe { simd_sub(a, simd_mul(b, c)) } +pub fn vmov_n_s16(value: i16) -> int16x4_t { + vdup_n_s16(value) } -#[doc = "Multiply-subtract from accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_s16)"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmov_n_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls) + assert_instr(dup) )] #[cfg_attr( not(target_arch = "arm"), @@ -26114,18 +32006,18 @@ pub fn vmls_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlsq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { - unsafe { simd_sub(a, simd_mul(b, c)) } +pub fn vmov_n_s32(value: i32) -> int32x2_t { + vdup_n_s32(value) } -#[doc = "Multiply-subtract from accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_s32)"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmov_n_s64)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls) + assert_instr(fmov) )] #[cfg_attr( not(target_arch = "arm"), @@ -26135,18 +32027,18 @@ pub fn vmlsq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmls_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { - unsafe { simd_sub(a, simd_mul(b, c)) } +pub fn vmov_n_s64(value: i64) -> int64x1_t { + vdup_n_s64(value) } -#[doc = "Multiply-subtract from accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_s32)"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmov_n_s8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls) + assert_instr(dup) )] #[cfg_attr( not(target_arch = "arm"), @@ -26156,18 +32048,18 @@ pub fn vmls_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlsq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { - unsafe { simd_sub(a, simd_mul(b, c)) } +pub fn vmov_n_s8(value: i8) -> int8x8_t { + vdup_n_s8(value) } -#[doc = "Multiply-subtract from accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_u8)"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmov_n_u16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls) + assert_instr(dup) )] #[cfg_attr( not(target_arch = "arm"), @@ -26177,18 +32069,18 @@ pub fn vmlsq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmls_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t { - unsafe { simd_sub(a, simd_mul(b, c)) } +pub fn vmov_n_u16(value: u16) -> uint16x4_t { + vdup_n_u16(value) } -#[doc = "Multiply-subtract from accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_u8)"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmov_n_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls) + assert_instr(dup) )] #[cfg_attr( not(target_arch = "arm"), @@ -26198,18 +32090,18 @@ pub fn vmls_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlsq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t { - unsafe { simd_sub(a, simd_mul(b, c)) } +pub fn vmov_n_u32(value: u32) -> uint32x2_t { + vdup_n_u32(value) } -#[doc = "Multiply-subtract from accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_u16)"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmov_n_u64)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls) + assert_instr(fmov) )] #[cfg_attr( not(target_arch = "arm"), @@ -26219,18 +32111,18 @@ pub fn vmlsq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmls_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x4_t) -> uint16x4_t { - unsafe { simd_sub(a, simd_mul(b, c)) } +pub fn vmov_n_u64(value: u64) -> uint64x1_t { + vdup_n_u64(value) } -#[doc = "Multiply-subtract from accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_u16)"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmov_n_u8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls) + assert_instr(dup) )] #[cfg_attr( not(target_arch = "arm"), @@ -26240,18 +32132,18 @@ pub fn vmls_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlsq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t { - unsafe { simd_sub(a, simd_mul(b, c)) } +pub fn vmov_n_u8(value: u8) -> uint8x8_t { + vdup_n_u8(value) } -#[doc = "Multiply-subtract from accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_u32)"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovq_n_f32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls) + assert_instr(dup) )] #[cfg_attr( not(target_arch = "arm"), @@ -26261,18 +32153,18 @@ pub fn vmlsq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmls_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x2_t) -> uint32x2_t { - unsafe { simd_sub(a, simd_mul(b, c)) } +pub fn vmovq_n_f32(value: f32) -> float32x4_t { + vdupq_n_f32(value) } -#[doc = "Multiply-subtract from accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_u32)"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovq_n_p16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls) + assert_instr(dup) )] #[cfg_attr( not(target_arch = "arm"), @@ -26282,20 +32174,19 @@ pub fn vmls_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlsq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { - unsafe { simd_sub(a, simd_mul(b, c)) } +pub fn vmovq_n_p16(value: p16) -> poly16x8_t { + vdupq_n_p16(value) } -#[doc = "Vector widening multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_lane_s16)"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovq_n_p8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s16", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smlsl, LANE = 1) + assert_instr(dup) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -26304,27 +32195,19 @@ pub fn vmlsq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlsl_lane_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { - static_assert_uimm_bits!(LANE, 2); - unsafe { - vmlsl_s16( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) - } +pub fn vmovq_n_p8(value: p8) -> poly8x16_t { + vdupq_n_p8(value) } -#[doc = "Vector widening multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_laneq_s16)"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovq_n_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s16", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smlsl, LANE = 1) + assert_instr(dup) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -26333,27 +32216,19 @@ pub fn vmlsl_lane_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlsl_laneq_s16(a: int32x4_t, b: int16x4_t, c: int16x8_t) -> int32x4_t { - static_assert_uimm_bits!(LANE, 3); - unsafe { - vmlsl_s16( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) - } +pub fn vmovq_n_s16(value: i16) -> int16x8_t { + vdupq_n_s16(value) } -#[doc = "Vector widening multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_lane_s32)"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovq_n_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s32", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smlsl, LANE = 1) + assert_instr(dup) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -26362,21 +32237,19 @@ pub fn vmlsl_laneq_s16(a: int32x4_t, b: int16x4_t, c: int16x8_t target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlsl_lane_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { - static_assert_uimm_bits!(LANE, 1); - unsafe { vmlsl_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) } +pub fn vmovq_n_s32(value: i32) -> int32x4_t { + vdupq_n_s32(value) } -#[doc = "Vector widening multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_laneq_s32)"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovq_n_s64)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s32", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smlsl, LANE = 1) + assert_instr(dup) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -26385,21 +32258,19 @@ pub fn vmlsl_lane_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlsl_laneq_s32(a: int64x2_t, b: int32x2_t, c: int32x4_t) -> int64x2_t { - static_assert_uimm_bits!(LANE, 2); - unsafe { vmlsl_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) } +pub fn vmovq_n_s64(value: i64) -> int64x2_t { + vdupq_n_s64(value) } -#[doc = "Vector widening multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_lane_u16)"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovq_n_s8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u16", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umlsl, LANE = 1) + assert_instr(dup) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -26408,27 +32279,19 @@ pub fn vmlsl_laneq_s32(a: int64x2_t, b: int32x2_t, c: int32x4_t target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlsl_lane_u16(a: uint32x4_t, b: uint16x4_t, c: uint16x4_t) -> uint32x4_t { - static_assert_uimm_bits!(LANE, 2); - unsafe { - vmlsl_u16( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) - } +pub fn vmovq_n_s8(value: i8) -> int8x16_t { + vdupq_n_s8(value) } -#[doc = "Vector widening multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_laneq_u16)"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovq_n_u16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u16", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umlsl, LANE = 1) + assert_instr(dup) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -26437,27 +32300,19 @@ pub fn vmlsl_lane_u16(a: uint32x4_t, b: uint16x4_t, c: uint16x4 target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlsl_laneq_u16(a: uint32x4_t, b: uint16x4_t, c: uint16x8_t) -> uint32x4_t { - static_assert_uimm_bits!(LANE, 3); - unsafe { - vmlsl_u16( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) - } +pub fn vmovq_n_u16(value: u16) -> uint16x8_t { + vdupq_n_u16(value) } -#[doc = "Vector widening multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_lane_u32)"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovq_n_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u32", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umlsl, LANE = 1) + assert_instr(dup) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -26466,21 +32321,19 @@ pub fn vmlsl_laneq_u16(a: uint32x4_t, b: uint16x4_t, c: uint16x target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlsl_lane_u32(a: uint64x2_t, b: uint32x2_t, c: uint32x2_t) -> uint64x2_t { - static_assert_uimm_bits!(LANE, 1); - unsafe { vmlsl_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) } +pub fn vmovq_n_u32(value: u32) -> uint32x4_t { + vdupq_n_u32(value) } -#[doc = "Vector widening multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_laneq_u32)"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovq_n_u64)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u32", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umlsl, LANE = 1) + assert_instr(dup) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -26489,19 +32342,18 @@ pub fn vmlsl_lane_u32(a: uint64x2_t, b: uint32x2_t, c: uint32x2 target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlsl_laneq_u32(a: uint64x2_t, b: uint32x2_t, c: uint32x4_t) -> uint64x2_t { - static_assert_uimm_bits!(LANE, 2); - unsafe { vmlsl_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) } +pub fn vmovq_n_u64(value: u64) -> uint64x2_t { + vdupq_n_u64(value) } -#[doc = "Vector widening multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_n_s16)"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovq_n_u8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smlsl) + assert_instr(dup) )] #[cfg_attr( not(target_arch = "arm"), @@ -26511,18 +32363,18 @@ pub fn vmlsl_laneq_u32(a: uint64x2_t, b: uint32x2_t, c: uint32x target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlsl_n_s16(a: int32x4_t, b: int16x4_t, c: i16) -> int32x4_t { - vmlsl_s16(a, b, vdup_n_s16(c)) +pub fn vmovq_n_u8(value: u8) -> uint8x16_t { + vdupq_n_u8(value) } -#[doc = "Vector widening multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_n_s32)"] +#[doc = "Vector long move."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smlsl) + assert_instr(sxtl) )] #[cfg_attr( not(target_arch = "arm"), @@ -26532,18 +32384,18 @@ pub fn vmlsl_n_s16(a: int32x4_t, b: int16x4_t, c: i16) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlsl_n_s32(a: int64x2_t, b: int32x2_t, c: i32) -> int64x2_t { - vmlsl_s32(a, b, vdup_n_s32(c)) +pub fn vmovl_s16(a: int16x4_t) -> int32x4_t { + unsafe { simd_cast(a) } } -#[doc = "Vector widening multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_n_u16)"] +#[doc = "Vector long move."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umlsl) + assert_instr(sxtl) )] #[cfg_attr( not(target_arch = "arm"), @@ -26553,18 +32405,18 @@ pub fn vmlsl_n_s32(a: int64x2_t, b: int32x2_t, c: i32) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlsl_n_u16(a: uint32x4_t, b: uint16x4_t, c: u16) -> uint32x4_t { - vmlsl_u16(a, b, vdup_n_u16(c)) +pub fn vmovl_s32(a: int32x2_t) -> int64x2_t { + unsafe { simd_cast(a) } } -#[doc = "Vector widening multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_n_u32)"] +#[doc = "Vector long move."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_s8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umlsl) + assert_instr(sxtl) )] #[cfg_attr( not(target_arch = "arm"), @@ -26574,18 +32426,18 @@ pub fn vmlsl_n_u16(a: uint32x4_t, b: uint16x4_t, c: u16) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlsl_n_u32(a: uint64x2_t, b: uint32x2_t, c: u32) -> uint64x2_t { - vmlsl_u32(a, b, vdup_n_u32(c)) +pub fn vmovl_s8(a: int8x8_t) -> int16x8_t { + unsafe { simd_cast(a) } } -#[doc = "Signed multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_s8)"] +#[doc = "Vector long move."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_u16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smlsl) + assert_instr(uxtl) )] #[cfg_attr( not(target_arch = "arm"), @@ -26595,18 +32447,18 @@ pub fn vmlsl_n_u32(a: uint64x2_t, b: uint32x2_t, c: u32) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlsl_s8(a: int16x8_t, b: int8x8_t, c: int8x8_t) -> int16x8_t { - unsafe { simd_sub(a, vmull_s8(b, c)) } +pub fn vmovl_u16(a: uint16x4_t) -> uint32x4_t { + unsafe { simd_cast(a) } } -#[doc = "Signed multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_s16)"] +#[doc = "Vector long move."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smlsl) + assert_instr(uxtl) )] #[cfg_attr( not(target_arch = "arm"), @@ -26616,18 +32468,18 @@ pub fn vmlsl_s8(a: int16x8_t, b: int8x8_t, c: int8x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlsl_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { - unsafe { simd_sub(a, vmull_s16(b, c)) } +pub fn vmovl_u32(a: uint32x2_t) -> uint64x2_t { + unsafe { simd_cast(a) } } -#[doc = "Signed multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_s32)"] +#[doc = "Vector long move."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_u8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smlsl) + assert_instr(uxtl) )] #[cfg_attr( not(target_arch = "arm"), @@ -26637,18 +32489,18 @@ pub fn vmlsl_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlsl_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { - unsafe { simd_sub(a, vmull_s32(b, c)) } +pub fn vmovl_u8(a: uint8x8_t) -> uint16x8_t { + unsafe { simd_cast(a) } } -#[doc = "Unsigned multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_u8)"] +#[doc = "Vector narrow integer."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umlsl) + assert_instr(xtn) )] #[cfg_attr( not(target_arch = "arm"), @@ -26658,18 +32510,18 @@ pub fn vmlsl_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlsl_u8(a: uint16x8_t, b: uint8x8_t, c: uint8x8_t) -> uint16x8_t { - unsafe { simd_sub(a, vmull_u8(b, c)) } +pub fn vmovn_s16(a: int16x8_t) -> int8x8_t { + unsafe { simd_cast(a) } } -#[doc = "Unsigned multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_u16)"] +#[doc = "Vector narrow integer."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umlsl) + assert_instr(xtn) )] #[cfg_attr( not(target_arch = "arm"), @@ -26679,18 +32531,18 @@ pub fn vmlsl_u8(a: uint16x8_t, b: uint8x8_t, c: uint8x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlsl_u16(a: uint32x4_t, b: uint16x4_t, c: uint16x4_t) -> uint32x4_t { - unsafe { simd_sub(a, vmull_u16(b, c)) } +pub fn vmovn_s32(a: int32x4_t) -> int16x4_t { + unsafe { simd_cast(a) } } -#[doc = "Unsigned multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_u32)"] +#[doc = "Vector narrow integer."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_s64)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umlsl) + assert_instr(xtn) )] #[cfg_attr( not(target_arch = "arm"), @@ -26700,94 +32552,71 @@ pub fn vmlsl_u16(a: uint32x4_t, b: uint16x4_t, c: uint16x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlsl_u32(a: uint64x2_t, b: uint32x2_t, c: uint32x2_t) -> uint64x2_t { - unsafe { simd_sub(a, vmull_u32(b, c)) } +pub fn vmovn_s64(a: int64x2_t) -> int32x2_t { + unsafe { simd_cast(a) } } -#[doc = "8-bit integer matrix multiply-accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmmlaq_s32)"] +#[doc = "Vector narrow integer."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_u16)"] #[inline] -#[target_feature(enable = "neon,i8mm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smmla) + assert_instr(xtn) )] #[cfg_attr( not(target_arch = "arm"), - unstable(feature = "stdarch_neon_i8mm", issue = "117223") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmmlaq_s32(a: int32x4_t, b: int8x16_t, c: int8x16_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.smmla.v4i32.v16i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.smmla.v4i32.v16i8")] - fn _vmmlaq_s32(a: int32x4_t, b: int8x16_t, c: int8x16_t) -> int32x4_t; - } - unsafe { _vmmlaq_s32(a, b, c) } +pub fn vmovn_u16(a: uint16x8_t) -> uint8x8_t { + unsafe { simd_cast(a) } } -#[doc = "8-bit integer matrix multiply-accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmmlaq_u32)"] +#[doc = "Vector narrow integer."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_u32)"] #[inline] -#[target_feature(enable = "neon,i8mm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ummla) + assert_instr(xtn) )] #[cfg_attr( not(target_arch = "arm"), - unstable(feature = "stdarch_neon_i8mm", issue = "117223") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmmlaq_u32(a: uint32x4_t, b: uint8x16_t, c: uint8x16_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ummla.v4i32.v16i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.ummla.v4i32.v16i8")] - fn _vmmlaq_u32(a: uint32x4_t, b: uint8x16_t, c: uint8x16_t) -> uint32x4_t; - } - unsafe { _vmmlaq_u32(a, b, c) } +pub fn vmovn_u32(a: uint32x4_t) -> uint16x4_t { + unsafe { simd_cast(a) } } -#[doc = "Duplicate element to vector"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmov_n_f16)"] +#[doc = "Vector narrow integer."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_u64)"] #[inline] +#[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup) + assert_instr(xtn) )] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vmov_n_f16(a: f16) -> float16x4_t { - vdup_n_f16(a) -} -#[doc = "Duplicate element to vector"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovq_n_f16)"] -#[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))] #[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup) + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") )] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vmovq_n_f16(a: f16) -> float16x8_t { - vdupq_n_f16(a) +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vmovn_u64(a: uint64x2_t) -> uint32x2_t { + unsafe { simd_cast(a) } } #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_f16)"] @@ -28535,6 +34364,314 @@ pub fn vmull_u32(a: uint32x2_t, b: uint32x2_t) -> uint64x2_t { } unsafe { _vmull_u32(a, b) } } +#[doc = "Vector bitwise not."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmvn_p8)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mvn) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vmvn_p8(a: poly8x8_t) -> poly8x8_t { + let b = poly8x8_t::splat(255); + unsafe { simd_xor(a, b) } +} +#[doc = "Vector bitwise not."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmvn_s16)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mvn) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vmvn_s16(a: int16x4_t) -> int16x4_t { + let b = int16x4_t::splat(-1); + unsafe { simd_xor(a, b) } +} +#[doc = "Vector bitwise not."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmvn_s32)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mvn) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vmvn_s32(a: int32x2_t) -> int32x2_t { + let b = int32x2_t::splat(-1); + unsafe { simd_xor(a, b) } +} +#[doc = "Vector bitwise not."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmvn_s8)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mvn) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vmvn_s8(a: int8x8_t) -> int8x8_t { + let b = int8x8_t::splat(-1); + unsafe { simd_xor(a, b) } +} +#[doc = "Vector bitwise not."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmvn_u16)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mvn) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vmvn_u16(a: uint16x4_t) -> uint16x4_t { + let b = uint16x4_t::splat(65_535); + unsafe { simd_xor(a, b) } +} +#[doc = "Vector bitwise not."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmvn_u32)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mvn) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vmvn_u32(a: uint32x2_t) -> uint32x2_t { + let b = uint32x2_t::splat(4_294_967_295); + unsafe { simd_xor(a, b) } +} +#[doc = "Vector bitwise not."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmvn_u8)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mvn) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vmvn_u8(a: uint8x8_t) -> uint8x8_t { + let b = uint8x8_t::splat(255); + unsafe { simd_xor(a, b) } +} +#[doc = "Vector bitwise not."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmvnq_p8)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mvn) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vmvnq_p8(a: poly8x16_t) -> poly8x16_t { + let b = poly8x16_t::splat(255); + unsafe { simd_xor(a, b) } +} +#[doc = "Vector bitwise not."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmvnq_s16)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mvn) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vmvnq_s16(a: int16x8_t) -> int16x8_t { + let b = int16x8_t::splat(-1); + unsafe { simd_xor(a, b) } +} +#[doc = "Vector bitwise not."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmvnq_s32)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mvn) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vmvnq_s32(a: int32x4_t) -> int32x4_t { + let b = int32x4_t::splat(-1); + unsafe { simd_xor(a, b) } +} +#[doc = "Vector bitwise not."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmvnq_s8)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mvn) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vmvnq_s8(a: int8x16_t) -> int8x16_t { + let b = int8x16_t::splat(-1); + unsafe { simd_xor(a, b) } +} +#[doc = "Vector bitwise not."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmvnq_u16)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mvn) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vmvnq_u16(a: uint16x8_t) -> uint16x8_t { + let b = uint16x8_t::splat(65_535); + unsafe { simd_xor(a, b) } +} +#[doc = "Vector bitwise not."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmvnq_u32)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mvn) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vmvnq_u32(a: uint32x4_t) -> uint32x4_t { + let b = uint32x4_t::splat(4_294_967_295); + unsafe { simd_xor(a, b) } +} +#[doc = "Vector bitwise not."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmvnq_u8)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mvn) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vmvnq_u8(a: uint8x16_t) -> uint8x16_t { + let b = uint8x16_t::splat(255); + unsafe { simd_xor(a, b) } +} #[doc = "Negate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vneg_f16)"] #[inline] @@ -28731,6 +34868,358 @@ pub fn vneg_s32(a: int32x2_t) -> int32x2_t { pub fn vnegq_s32(a: int32x4_t) -> int32x4_t { unsafe { simd_neg(a) } } +#[doc = "Vector bitwise inclusive OR NOT"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorn_s16)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(orn) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vorn_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + let c = int16x4_t::splat(-1); + unsafe { simd_or(simd_xor(b, c), a) } +} +#[doc = "Vector bitwise inclusive OR NOT"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorn_s32)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(orn) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vorn_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + let c = int32x2_t::splat(-1); + unsafe { simd_or(simd_xor(b, c), a) } +} +#[doc = "Vector bitwise inclusive OR NOT"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorn_s64)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(orn) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vorn_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { + let c = int64x1_t::splat(-1); + unsafe { simd_or(simd_xor(b, c), a) } +} +#[doc = "Vector bitwise inclusive OR NOT"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorn_s8)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(orn) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vorn_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + let c = int8x8_t::splat(-1); + unsafe { simd_or(simd_xor(b, c), a) } +} +#[doc = "Vector bitwise inclusive OR NOT"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vornq_s16)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(orn) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vornq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + let c = int16x8_t::splat(-1); + unsafe { simd_or(simd_xor(b, c), a) } +} +#[doc = "Vector bitwise inclusive OR NOT"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vornq_s32)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(orn) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vornq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + let c = int32x4_t::splat(-1); + unsafe { simd_or(simd_xor(b, c), a) } +} +#[doc = "Vector bitwise inclusive OR NOT"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vornq_s64)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(orn) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vornq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { + let c = int64x2_t::splat(-1); + unsafe { simd_or(simd_xor(b, c), a) } +} +#[doc = "Vector bitwise inclusive OR NOT"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vornq_s8)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(orn) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vornq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + let c = int8x16_t::splat(-1); + unsafe { simd_or(simd_xor(b, c), a) } +} +#[doc = "Vector bitwise inclusive OR NOT"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorn_u16)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(orn) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vorn_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + let c = int16x4_t::splat(-1); + unsafe { simd_or(simd_xor(b, transmute(c)), a) } +} +#[doc = "Vector bitwise inclusive OR NOT"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorn_u32)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(orn) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vorn_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + let c = int32x2_t::splat(-1); + unsafe { simd_or(simd_xor(b, transmute(c)), a) } +} +#[doc = "Vector bitwise inclusive OR NOT"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorn_u64)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(orn) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vorn_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { + let c = int64x1_t::splat(-1); + unsafe { simd_or(simd_xor(b, transmute(c)), a) } +} +#[doc = "Vector bitwise inclusive OR NOT"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorn_u8)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(orn) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vorn_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + let c = int8x8_t::splat(-1); + unsafe { simd_or(simd_xor(b, transmute(c)), a) } +} +#[doc = "Vector bitwise inclusive OR NOT"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vornq_u16)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(orn) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vornq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + let c = int16x8_t::splat(-1); + unsafe { simd_or(simd_xor(b, transmute(c)), a) } +} +#[doc = "Vector bitwise inclusive OR NOT"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vornq_u32)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(orn) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vornq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + let c = int32x4_t::splat(-1); + unsafe { simd_or(simd_xor(b, transmute(c)), a) } +} +#[doc = "Vector bitwise inclusive OR NOT"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vornq_u64)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(orn) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vornq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + let c = int64x2_t::splat(-1); + unsafe { simd_or(simd_xor(b, transmute(c)), a) } +} +#[doc = "Vector bitwise inclusive OR NOT"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vornq_u8)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(orn) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vornq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + let c = int8x16_t::splat(-1); + unsafe { simd_or(simd_xor(b, transmute(c)), a) } +} #[doc = "Vector bitwise or (immediate, inclusive)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorr_s8)"] #[inline] @@ -34676,43 +41165,283 @@ pub fn vqshlu_n_s64(a: int64x1_t) -> uint64x1_t { )] fn _vqshlu_n_s64(a: int64x1_t, n: int64x1_t) -> uint64x1_t; } - unsafe { _vqshlu_n_s64(a, const { int64x1_t([N as i64]) }) } + unsafe { _vqshlu_n_s64(a, const { int64x1_t([N as i64]) }) } +} +#[doc = "Signed saturating shift left unsigned"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s64)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(sqshlu, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vqshluq_n_s64(a: int64x2_t) -> uint64x2_t { + static_assert_uimm_bits!(N, 6); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqshlu.v2i64" + )] + fn _vqshluq_n_s64(a: int64x2_t, n: int64x2_t) -> uint64x2_t; + } + unsafe { _vqshluq_n_s64(a, const { int64x2_t([N as i64, N as i64]) }) } +} +#[doc = "Signed saturating shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_s16)"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vqshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub fn vqshrn_n_s16(a: int16x8_t) -> int8x8_t { + static_assert!(N >= 1 && N <= 8); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftns.v8i8")] + fn _vqshrn_n_s16(a: int16x8_t, n: int16x8_t) -> int8x8_t; + } + unsafe { + _vqshrn_n_s16( + a, + const { + int16x8_t([ + -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, + -N as i16, + ]) + }, + ) + } +} +#[doc = "Signed saturating shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_s32)"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vqshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub fn vqshrn_n_s32(a: int32x4_t) -> int16x4_t { + static_assert!(N >= 1 && N <= 16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftns.v4i16")] + fn _vqshrn_n_s32(a: int32x4_t, n: int32x4_t) -> int16x4_t; + } + unsafe { + _vqshrn_n_s32( + a, + const { int32x4_t([-N as i32, -N as i32, -N as i32, -N as i32]) }, + ) + } +} +#[doc = "Signed saturating shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_s64)"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vqshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub fn vqshrn_n_s64(a: int64x2_t) -> int32x2_t { + static_assert!(N >= 1 && N <= 32); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftns.v2i32")] + fn _vqshrn_n_s64(a: int64x2_t, n: int64x2_t) -> int32x2_t; + } + unsafe { _vqshrn_n_s64(a, const { int64x2_t([-N as i64, -N as i64]) }) } +} +#[doc = "Signed saturating shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_s16)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(sqshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vqshrn_n_s16(a: int16x8_t) -> int8x8_t { + static_assert!(N >= 1 && N <= 8); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqshrn.v8i8" + )] + fn _vqshrn_n_s16(a: int16x8_t, n: i32) -> int8x8_t; + } + unsafe { _vqshrn_n_s16(a, N) } +} +#[doc = "Signed saturating shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_s32)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(sqshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vqshrn_n_s32(a: int32x4_t) -> int16x4_t { + static_assert!(N >= 1 && N <= 16); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqshrn.v4i16" + )] + fn _vqshrn_n_s32(a: int32x4_t, n: i32) -> int16x4_t; + } + unsafe { _vqshrn_n_s32(a, N) } +} +#[doc = "Signed saturating shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_s64)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(sqshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vqshrn_n_s64(a: int64x2_t) -> int32x2_t { + static_assert!(N >= 1 && N <= 32); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqshrn.v2i32" + )] + fn _vqshrn_n_s64(a: int64x2_t, n: i32) -> int32x2_t; + } + unsafe { _vqshrn_n_s64(a, N) } +} +#[doc = "Unsigned saturating shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_u16)"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vqshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub fn vqshrn_n_u16(a: uint16x8_t) -> uint8x8_t { + static_assert!(N >= 1 && N <= 8); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnu.v8i8")] + fn _vqshrn_n_u16(a: uint16x8_t, n: uint16x8_t) -> uint8x8_t; + } + unsafe { + _vqshrn_n_u16( + a, + const { + uint16x8_t([ + -N as u16, -N as u16, -N as u16, -N as u16, -N as u16, -N as u16, -N as u16, + -N as u16, + ]) + }, + ) + } +} +#[doc = "Unsigned saturating shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_u32)"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vqshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub fn vqshrn_n_u32(a: uint32x4_t) -> uint16x4_t { + static_assert!(N >= 1 && N <= 16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnu.v4i16")] + fn _vqshrn_n_u32(a: uint32x4_t, n: uint32x4_t) -> uint16x4_t; + } + unsafe { + _vqshrn_n_u32( + a, + const { uint32x4_t([-N as u32, -N as u32, -N as u32, -N as u32]) }, + ) + } +} +#[doc = "Unsigned saturating shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_u64)"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vqshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub fn vqshrn_n_u64(a: uint64x2_t) -> uint32x2_t { + static_assert!(N >= 1 && N <= 32); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnu.v2i32")] + fn _vqshrn_n_u64(a: uint64x2_t, n: uint64x2_t) -> uint32x2_t; + } + unsafe { _vqshrn_n_u64(a, const { uint64x2_t([-N as u64, -N as u64]) }) } +} +#[doc = "Unsigned saturating shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_u16)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(uqshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vqshrn_n_u16(a: uint16x8_t) -> uint8x8_t { + static_assert!(N >= 1 && N <= 8); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqshrn.v8i8" + )] + fn _vqshrn_n_u16(a: uint16x8_t, n: i32) -> uint8x8_t; + } + unsafe { _vqshrn_n_u16(a, N) } +} +#[doc = "Unsigned saturating shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_u32)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(uqshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vqshrn_n_u32(a: uint32x4_t) -> uint16x4_t { + static_assert!(N >= 1 && N <= 16); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqshrn.v4i16" + )] + fn _vqshrn_n_u32(a: uint32x4_t, n: i32) -> uint16x4_t; + } + unsafe { _vqshrn_n_u32(a, N) } } -#[doc = "Signed saturating shift left unsigned"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s64)"] +#[doc = "Unsigned saturating shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_u64)"] #[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(sqshlu, N = 2))] +#[cfg_attr(test, assert_instr(uqshrn, N = 2))] #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vqshluq_n_s64(a: int64x2_t) -> uint64x2_t { - static_assert_uimm_bits!(N, 6); +pub fn vqshrn_n_u64(a: uint64x2_t) -> uint32x2_t { + static_assert!(N >= 1 && N <= 32); unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqshlu.v2i64" + link_name = "llvm.aarch64.neon.uqshrn.v2i32" )] - fn _vqshluq_n_s64(a: int64x2_t, n: int64x2_t) -> uint64x2_t; + fn _vqshrn_n_u64(a: uint64x2_t, n: i32) -> uint32x2_t; } - unsafe { _vqshluq_n_s64(a, const { int64x2_t([N as i64, N as i64]) }) } + unsafe { _vqshrn_n_u64(a, N) } } -#[doc = "Signed saturating shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_s16)"] +#[doc = "Signed saturating shift right unsigned narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_n_s16)"] #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vqshrn, N = 2))] +#[cfg_attr(test, assert_instr(vqshrun, N = 2))] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub fn vqshrn_n_s16(a: int16x8_t) -> int8x8_t { +pub fn vqshrun_n_s16(a: int16x8_t) -> uint8x8_t { static_assert!(N >= 1 && N <= 8); unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftns.v8i8")] - fn _vqshrn_n_s16(a: int16x8_t, n: int16x8_t) -> int8x8_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnsu.v8i8")] + fn _vqshrun_n_s16(a: int16x8_t, n: int16x8_t) -> uint8x8_t; } unsafe { - _vqshrn_n_s16( + _vqshrun_n_s16( a, const { int16x8_t([ @@ -34723,349 +41452,769 @@ pub fn vqshrn_n_s16(a: int16x8_t) -> int8x8_t { ) } } -#[doc = "Signed saturating shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_s32)"] +#[doc = "Signed saturating shift right unsigned narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_n_s32)"] #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vqshrn, N = 2))] +#[cfg_attr(test, assert_instr(vqshrun, N = 2))] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub fn vqshrn_n_s32(a: int32x4_t) -> int16x4_t { +pub fn vqshrun_n_s32(a: int32x4_t) -> uint16x4_t { static_assert!(N >= 1 && N <= 16); unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftns.v4i16")] - fn _vqshrn_n_s32(a: int32x4_t, n: int32x4_t) -> int16x4_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnsu.v4i16")] + fn _vqshrun_n_s32(a: int32x4_t, n: int32x4_t) -> uint16x4_t; } unsafe { - _vqshrn_n_s32( + _vqshrun_n_s32( a, const { int32x4_t([-N as i32, -N as i32, -N as i32, -N as i32]) }, ) } } -#[doc = "Signed saturating shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_s64)"] +#[doc = "Signed saturating shift right unsigned narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_n_s64)"] #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vqshrn, N = 2))] +#[cfg_attr(test, assert_instr(vqshrun, N = 2))] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub fn vqshrn_n_s64(a: int64x2_t) -> int32x2_t { +pub fn vqshrun_n_s64(a: int64x2_t) -> uint32x2_t { static_assert!(N >= 1 && N <= 32); unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftns.v2i32")] - fn _vqshrn_n_s64(a: int64x2_t, n: int64x2_t) -> int32x2_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnsu.v2i32")] + fn _vqshrun_n_s64(a: int64x2_t, n: int64x2_t) -> uint32x2_t; } - unsafe { _vqshrn_n_s64(a, const { int64x2_t([-N as i64, -N as i64]) }) } + unsafe { _vqshrun_n_s64(a, const { int64x2_t([-N as i64, -N as i64]) }) } } -#[doc = "Signed saturating shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_s16)"] +#[doc = "Signed saturating shift right unsigned narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_n_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(sqshrn, N = 2))] +#[cfg_attr(test, assert_instr(sqshrun, N = 2))] #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vqshrn_n_s16(a: int16x8_t) -> int8x8_t { +pub fn vqshrun_n_s16(a: int16x8_t) -> uint8x8_t { static_assert!(N >= 1 && N <= 8); unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqshrn.v8i8" + link_name = "llvm.aarch64.neon.sqshrun.v8i8" )] - fn _vqshrn_n_s16(a: int16x8_t, n: i32) -> int8x8_t; + fn _vqshrun_n_s16(a: int16x8_t, n: i32) -> uint8x8_t; } - unsafe { _vqshrn_n_s16(a, N) } + unsafe { _vqshrun_n_s16(a, N) } } -#[doc = "Signed saturating shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_s32)"] +#[doc = "Signed saturating shift right unsigned narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_n_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(sqshrn, N = 2))] +#[cfg_attr(test, assert_instr(sqshrun, N = 2))] #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vqshrn_n_s32(a: int32x4_t) -> int16x4_t { +pub fn vqshrun_n_s32(a: int32x4_t) -> uint16x4_t { static_assert!(N >= 1 && N <= 16); unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqshrn.v4i16" + link_name = "llvm.aarch64.neon.sqshrun.v4i16" )] - fn _vqshrn_n_s32(a: int32x4_t, n: i32) -> int16x4_t; + fn _vqshrun_n_s32(a: int32x4_t, n: i32) -> uint16x4_t; } - unsafe { _vqshrn_n_s32(a, N) } + unsafe { _vqshrun_n_s32(a, N) } } -#[doc = "Signed saturating shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_s64)"] +#[doc = "Signed saturating shift right unsigned narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_n_s64)"] #[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(sqshrn, N = 2))] +#[cfg_attr(test, assert_instr(sqshrun, N = 2))] #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vqshrn_n_s64(a: int64x2_t) -> int32x2_t { +pub fn vqshrun_n_s64(a: int64x2_t) -> uint32x2_t { static_assert!(N >= 1 && N <= 32); unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqshrn.v2i32" + link_name = "llvm.aarch64.neon.sqshrun.v2i32" )] - fn _vqshrn_n_s64(a: int64x2_t, n: i32) -> int32x2_t; + fn _vqshrun_n_s64(a: int64x2_t, n: i32) -> uint32x2_t; } - unsafe { _vqshrn_n_s64(a, N) } + unsafe { _vqshrun_n_s64(a, N) } } -#[doc = "Unsigned saturating shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_u16)"] +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_s8)"] #[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vqshrn, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub fn vqshrn_n_u16(a: uint16x8_t) -> uint8x8_t { - static_assert!(N >= 1 && N <= 8); +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqsub) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vqsub_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnu.v8i8")] - fn _vqshrn_n_u16(a: uint16x8_t, n: uint16x8_t) -> uint8x8_t; + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqsub.v8i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.ssub.sat.v8i8")] + fn _vqsub_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; } - unsafe { - _vqshrn_n_u16( - a, - const { - uint16x8_t([ - -N as u16, -N as u16, -N as u16, -N as u16, -N as u16, -N as u16, -N as u16, - -N as u16, - ]) - }, - ) + unsafe { _vqsub_s8(a, b) } +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_s8)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqsub) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vqsubq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqsub.v16i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.ssub.sat.v16i8")] + fn _vqsubq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; } + unsafe { _vqsubq_s8(a, b) } } -#[doc = "Unsigned saturating shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_u32)"] +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_s16)"] #[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vqshrn, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub fn vqshrn_n_u32(a: uint32x4_t) -> uint16x4_t { - static_assert!(N >= 1 && N <= 16); +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqsub) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vqsub_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnu.v4i16")] - fn _vqshrn_n_u32(a: uint32x4_t, n: uint32x4_t) -> uint16x4_t; + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqsub.v4i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.ssub.sat.v4i16")] + fn _vqsub_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; } - unsafe { - _vqshrn_n_u32( - a, - const { uint32x4_t([-N as u32, -N as u32, -N as u32, -N as u32]) }, - ) + unsafe { _vqsub_s16(a, b) } +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_s16)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqsub) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vqsubq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqsub.v8i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.ssub.sat.v8i16")] + fn _vqsubq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; } + unsafe { _vqsubq_s16(a, b) } } -#[doc = "Unsigned saturating shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_u64)"] +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_s32)"] #[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vqshrn, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub fn vqshrn_n_u64(a: uint64x2_t) -> uint32x2_t { - static_assert!(N >= 1 && N <= 32); +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqsub) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vqsub_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnu.v2i32")] - fn _vqshrn_n_u64(a: uint64x2_t, n: uint64x2_t) -> uint32x2_t; + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqsub.v2i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.ssub.sat.v2i32")] + fn _vqsub_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + } + unsafe { _vqsub_s32(a, b) } +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_s32)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqsub) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vqsubq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqsub.v4i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.ssub.sat.v4i32")] + fn _vqsubq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + } + unsafe { _vqsubq_s32(a, b) } +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_s64)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s64"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqsub) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vqsub_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqsub.v1i64" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.ssub.sat.v1i64")] + fn _vqsub_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t; + } + unsafe { _vqsub_s64(a, b) } +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_s64)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s64"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqsub) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vqsubq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqsub.v2i64" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.ssub.sat.v2i64")] + fn _vqsubq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t; + } + unsafe { _vqsubq_s64(a, b) } +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_u8)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uqsub) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vqsub_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqsub.v8i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.v8i8")] + fn _vqsub_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t; + } + unsafe { _vqsub_u8(a, b) } +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_u8)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uqsub) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vqsubq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqsub.v16i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.v16i8")] + fn _vqsubq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t; + } + unsafe { _vqsubq_u8(a, b) } +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_u16)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uqsub) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vqsub_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqsub.v4i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.v4i16")] + fn _vqsub_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t; + } + unsafe { _vqsub_u16(a, b) } +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_u16)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uqsub) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vqsubq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqsub.v8i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.v8i16")] + fn _vqsubq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t; + } + unsafe { _vqsubq_u16(a, b) } +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_u32)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uqsub) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vqsub_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqsub.v2i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.v2i32")] + fn _vqsub_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t; } - unsafe { _vqshrn_n_u64(a, const { uint64x2_t([-N as u64, -N as u64]) }) } + unsafe { _vqsub_u32(a, b) } } -#[doc = "Unsigned saturating shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_u16)"] +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_u32)"] #[inline] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(uqshrn, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vqshrn_n_u16(a: uint16x8_t) -> uint8x8_t { - static_assert!(N >= 1 && N <= 8); +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uqsub) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vqsubq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqshrn.v8i8" + link_name = "llvm.aarch64.neon.uqsub.v4i32" )] - fn _vqshrn_n_u16(a: uint16x8_t, n: i32) -> uint8x8_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.v4i32")] + fn _vqsubq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t; } - unsafe { _vqshrn_n_u16(a, N) } + unsafe { _vqsubq_u32(a, b) } } -#[doc = "Unsigned saturating shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_u32)"] +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_u64)"] #[inline] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(uqshrn, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vqshrn_n_u32(a: uint32x4_t) -> uint16x4_t { - static_assert!(N >= 1 && N <= 16); +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u64"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uqsub) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vqsub_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqshrn.v4i16" + link_name = "llvm.aarch64.neon.uqsub.v1i64" )] - fn _vqshrn_n_u32(a: uint32x4_t, n: i32) -> uint16x4_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.v1i64")] + fn _vqsub_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t; } - unsafe { _vqshrn_n_u32(a, N) } + unsafe { _vqsub_u64(a, b) } } -#[doc = "Unsigned saturating shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_u64)"] +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_u64)"] #[inline] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(uqshrn, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vqshrn_n_u64(a: uint64x2_t) -> uint32x2_t { - static_assert!(N >= 1 && N <= 32); +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u64"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uqsub) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vqsubq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqshrn.v2i32" + link_name = "llvm.aarch64.neon.uqsub.v2i64" )] - fn _vqshrn_n_u64(a: uint64x2_t, n: i32) -> uint32x2_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.v2i64")] + fn _vqsubq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t; } - unsafe { _vqshrn_n_u64(a, N) } + unsafe { _vqsubq_u64(a, b) } } -#[doc = "Signed saturating shift right unsigned narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_n_s16)"] +#[doc = "Rounding Add returning High Narrow (high half)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_high_s16)"] #[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vqshrun, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub fn vqshrun_n_s16(a: int16x8_t) -> uint8x8_t { - static_assert!(N >= 1 && N <= 8); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnsu.v8i8")] - fn _vqshrun_n_s16(a: int16x8_t, n: int16x8_t) -> uint8x8_t; - } - unsafe { - _vqshrun_n_s16( - a, - const { - int16x8_t([ - -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, - -N as i16, - ]) - }, - ) - } +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(raddhn2) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vraddhn_high_s16(a: int8x8_t, b: int16x8_t, c: int16x8_t) -> int8x16_t { + let x = vraddhn_s16(b, c); + unsafe { simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) } } -#[doc = "Signed saturating shift right unsigned narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_n_s32)"] +#[doc = "Rounding Add returning High Narrow (high half)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_high_s32)"] #[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vqshrun, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub fn vqshrun_n_s32(a: int32x4_t) -> uint16x4_t { - static_assert!(N >= 1 && N <= 16); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnsu.v4i16")] - fn _vqshrun_n_s32(a: int32x4_t, n: int32x4_t) -> uint16x4_t; - } +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(raddhn2) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vraddhn_high_s32(a: int16x4_t, b: int32x4_t, c: int32x4_t) -> int16x8_t { + let x = vraddhn_s32(b, c); + unsafe { simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7]) } +} +#[doc = "Rounding Add returning High Narrow (high half)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_high_s64)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i64"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(raddhn2) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vraddhn_high_s64(a: int32x2_t, b: int64x2_t, c: int64x2_t) -> int32x4_t { + let x = vraddhn_s64(b, c); + unsafe { simd_shuffle!(a, x, [0, 1, 2, 3]) } +} +#[doc = "Rounding Add returning High Narrow (high half)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_high_u16)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(raddhn2) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vraddhn_high_u16(a: uint8x8_t, b: uint16x8_t, c: uint16x8_t) -> uint8x16_t { unsafe { - _vqshrun_n_s32( - a, - const { int32x4_t([-N as i32, -N as i32, -N as i32, -N as i32]) }, - ) + let x: uint8x8_t = transmute(vraddhn_s16(transmute(b), transmute(c))); + simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) } } -#[doc = "Signed saturating shift right unsigned narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_n_s64)"] +#[doc = "Rounding Add returning High Narrow (high half)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_high_u32)"] #[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vqshrun, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub fn vqshrun_n_s64(a: int64x2_t) -> uint32x2_t { - static_assert!(N >= 1 && N <= 32); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnsu.v2i32")] - fn _vqshrun_n_s64(a: int64x2_t, n: int64x2_t) -> uint32x2_t; +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(raddhn2) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vraddhn_high_u32(a: uint16x4_t, b: uint32x4_t, c: uint32x4_t) -> uint16x8_t { + unsafe { + let x: uint16x4_t = transmute(vraddhn_s32(transmute(b), transmute(c))); + simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7]) } - unsafe { _vqshrun_n_s64(a, const { int64x2_t([-N as i64, -N as i64]) }) } } -#[doc = "Signed saturating shift right unsigned narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_n_s16)"] +#[doc = "Rounding Add returning High Narrow (high half)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_high_u64)"] #[inline] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(sqshrun, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vqshrun_n_s16(a: int16x8_t) -> uint8x8_t { - static_assert!(N >= 1 && N <= 8); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqshrun.v8i8" - )] - fn _vqshrun_n_s16(a: int16x8_t, n: i32) -> uint8x8_t; +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i64"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(raddhn2) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vraddhn_high_u64(a: uint32x2_t, b: uint64x2_t, c: uint64x2_t) -> uint32x4_t { + unsafe { + let x: uint32x2_t = transmute(vraddhn_s64(transmute(b), transmute(c))); + simd_shuffle!(a, x, [0, 1, 2, 3]) } - unsafe { _vqshrun_n_s16(a, N) } } -#[doc = "Signed saturating shift right unsigned narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_n_s32)"] +#[doc = "Rounding Add returning High Narrow."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_s16)"] #[inline] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(sqshrun, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vqshrun_n_s32(a: int32x4_t) -> uint16x4_t { - static_assert!(N >= 1 && N <= 16); +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(raddhn) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vraddhn_s16(a: int16x8_t, b: int16x8_t) -> int8x8_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqshrun.v4i16" + link_name = "llvm.aarch64.neon.raddhn.v8i8" )] - fn _vqshrun_n_s32(a: int32x4_t, n: i32) -> uint16x4_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vraddhn.v8i8")] + fn _vraddhn_s16(a: int16x8_t, b: int16x8_t) -> int8x8_t; } - unsafe { _vqshrun_n_s32(a, N) } + unsafe { _vraddhn_s16(a, b) } } -#[doc = "Signed saturating shift right unsigned narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_n_s64)"] +#[doc = "Rounding Add returning High Narrow."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_s32)"] #[inline] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(sqshrun, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vqshrun_n_s64(a: int64x2_t) -> uint32x2_t { - static_assert!(N >= 1 && N <= 32); +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(raddhn) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vraddhn_s32(a: int32x4_t, b: int32x4_t) -> int16x4_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqshrun.v2i32" + link_name = "llvm.aarch64.neon.raddhn.v4i16" )] - fn _vqshrun_n_s64(a: int64x2_t, n: i32) -> uint32x2_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vraddhn.v4i16")] + fn _vraddhn_s32(a: int32x4_t, b: int32x4_t) -> int16x4_t; } - unsafe { _vqshrun_n_s64(a, N) } + unsafe { _vraddhn_s32(a, b) } } -#[doc = "Saturating subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_s8)"] +#[doc = "Rounding Add returning High Narrow."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_s64)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i64"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqsub) + assert_instr(raddhn) )] #[cfg_attr( not(target_arch = "arm"), @@ -35075,26 +42224,27 @@ pub fn vqshrun_n_s64(a: int64x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqsub_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { +pub fn vraddhn_s64(a: int64x2_t, b: int64x2_t) -> int32x2_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqsub.v8i8" + link_name = "llvm.aarch64.neon.raddhn.v2i32" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.ssub.sat.v8i8")] - fn _vqsub_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vraddhn.v2i32")] + fn _vraddhn_s64(a: int64x2_t, b: int64x2_t) -> int32x2_t; } - unsafe { _vqsub_s8(a, b) } + unsafe { _vraddhn_s64(a, b) } } -#[doc = "Saturating subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_s8)"] +#[doc = "Rounding Add returning High Narrow."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_u16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqsub) + assert_instr(raddhn) )] #[cfg_attr( not(target_arch = "arm"), @@ -35104,26 +42254,19 @@ pub fn vqsub_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqsubq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqsub.v16i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.ssub.sat.v16i8")] - fn _vqsubq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; - } - unsafe { _vqsubq_s8(a, b) } +pub fn vraddhn_u16(a: uint16x8_t, b: uint16x8_t) -> uint8x8_t { + unsafe { transmute(vraddhn_s16(transmute(a), transmute(b))) } } -#[doc = "Saturating subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_s16)"] +#[doc = "Rounding Add returning High Narrow."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_u16)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqsub) + assert_instr(raddhn) )] #[cfg_attr( not(target_arch = "arm"), @@ -35133,26 +42276,24 @@ pub fn vqsubq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqsub_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqsub.v4i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.ssub.sat.v4i16")] - fn _vqsub_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; +pub fn vraddhn_u16(a: uint16x8_t, b: uint16x8_t) -> uint8x8_t { + let a: uint16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; + let b: uint16x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) }; + unsafe { + let ret_val: uint8x8_t = transmute(vraddhn_s16(transmute(a), transmute(b))); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - unsafe { _vqsub_s16(a, b) } } -#[doc = "Saturating subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_s16)"] +#[doc = "Rounding Add returning High Narrow."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_u32)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqsub) + assert_instr(raddhn) )] #[cfg_attr( not(target_arch = "arm"), @@ -35162,26 +42303,19 @@ pub fn vqsub_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqsubq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqsub.v8i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.ssub.sat.v8i16")] - fn _vqsubq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; - } - unsafe { _vqsubq_s16(a, b) } +pub fn vraddhn_u32(a: uint32x4_t, b: uint32x4_t) -> uint16x4_t { + unsafe { transmute(vraddhn_s32(transmute(a), transmute(b))) } } -#[doc = "Saturating subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_s32)"] +#[doc = "Rounding Add returning High Narrow."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_u32)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqsub) + assert_instr(raddhn) )] #[cfg_attr( not(target_arch = "arm"), @@ -35191,26 +42325,24 @@ pub fn vqsubq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqsub_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqsub.v2i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.ssub.sat.v2i32")] - fn _vqsub_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; +pub fn vraddhn_u32(a: uint32x4_t, b: uint32x4_t) -> uint16x4_t { + let a: uint32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; + let b: uint32x4_t = unsafe { simd_shuffle!(b, b, [3, 2, 1, 0]) }; + unsafe { + let ret_val: uint16x4_t = transmute(vraddhn_s32(transmute(a), transmute(b))); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } - unsafe { _vqsub_s32(a, b) } } -#[doc = "Saturating subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_s32)"] +#[doc = "Rounding Add returning High Narrow."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_u64)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i64"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqsub) + assert_instr(raddhn) )] #[cfg_attr( not(target_arch = "arm"), @@ -35220,26 +42352,19 @@ pub fn vqsub_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqsubq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqsub.v4i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.ssub.sat.v4i32")] - fn _vqsubq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - } - unsafe { _vqsubq_s32(a, b) } +pub fn vraddhn_u64(a: uint64x2_t, b: uint64x2_t) -> uint32x2_t { + unsafe { transmute(vraddhn_s64(transmute(a), transmute(b))) } } -#[doc = "Saturating subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_s64)"] +#[doc = "Rounding Add returning High Narrow."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_u64)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s64"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i64"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqsub) + assert_instr(raddhn) )] #[cfg_attr( not(target_arch = "arm"), @@ -35249,55 +42374,67 @@ pub fn vqsubq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqsub_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { +pub fn vraddhn_u64(a: uint64x2_t, b: uint64x2_t) -> uint32x2_t { + let a: uint64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; + let b: uint64x2_t = unsafe { simd_shuffle!(b, b, [1, 0]) }; + unsafe { + let ret_val: uint32x2_t = transmute(vraddhn_s64(transmute(a), transmute(b))); + simd_shuffle!(ret_val, ret_val, [1, 0]) + } +} +#[doc = "Reciprocal estimate."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpe_f16)"] +#[inline] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrecpe))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(frecpe) +)] +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +pub fn vrecpe_f16(a: float16x4_t) -> float16x4_t { unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrecpe.v4f16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqsub.v1i64" + link_name = "llvm.aarch64.neon.frecpe.v4f16" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.ssub.sat.v1i64")] - fn _vqsub_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t; + fn _vrecpe_f16(a: float16x4_t) -> float16x4_t; } - unsafe { _vqsub_s64(a, b) } + unsafe { _vrecpe_f16(a) } } -#[doc = "Saturating subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_s64)"] +#[doc = "Reciprocal estimate."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpeq_f16)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s64"))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrecpe))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqsub) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + assert_instr(frecpe) )] -pub fn vqsubq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +pub fn vrecpeq_f16(a: float16x8_t) -> float16x8_t { unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrecpe.v8f16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqsub.v2i64" + link_name = "llvm.aarch64.neon.frecpe.v8f16" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.ssub.sat.v2i64")] - fn _vqsubq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t; + fn _vrecpeq_f16(a: float16x8_t) -> float16x8_t; } - unsafe { _vqsubq_s64(a, b) } + unsafe { _vrecpeq_f16(a) } } -#[doc = "Saturating subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_u8)"] +#[doc = "Reciprocal estimate."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpe_f32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrecpe))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqsub) + assert_instr(frecpe) )] #[cfg_attr( not(target_arch = "arm"), @@ -35307,26 +42444,26 @@ pub fn vqsubq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqsub_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { +pub fn vrecpe_f32(a: float32x2_t) -> float32x2_t { unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrecpe.v2f32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqsub.v8i8" + link_name = "llvm.aarch64.neon.frecpe.v2f32" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.v8i8")] - fn _vqsub_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t; + fn _vrecpe_f32(a: float32x2_t) -> float32x2_t; } - unsafe { _vqsub_u8(a, b) } + unsafe { _vrecpe_f32(a) } } -#[doc = "Saturating subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_u8)"] +#[doc = "Reciprocal estimate."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpeq_f32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrecpe))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqsub) + assert_instr(frecpe) )] #[cfg_attr( not(target_arch = "arm"), @@ -35336,26 +42473,26 @@ pub fn vqsub_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqsubq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { +pub fn vrecpeq_f32(a: float32x4_t) -> float32x4_t { unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrecpe.v4f32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqsub.v16i8" + link_name = "llvm.aarch64.neon.frecpe.v4f32" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.v16i8")] - fn _vqsubq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t; + fn _vrecpeq_f32(a: float32x4_t) -> float32x4_t; } - unsafe { _vqsubq_u8(a, b) } + unsafe { _vrecpeq_f32(a) } } -#[doc = "Saturating subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_u16)"] +#[doc = "Unsigned reciprocal estimate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpe_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrecpe))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqsub) + assert_instr(urecpe) )] #[cfg_attr( not(target_arch = "arm"), @@ -35365,26 +42502,26 @@ pub fn vqsubq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqsub_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { +pub fn vrecpe_u32(a: uint32x2_t) -> uint32x2_t { unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrecpe.v2i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqsub.v4i16" + link_name = "llvm.aarch64.neon.urecpe.v2i32" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.v4i16")] - fn _vqsub_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t; + fn _vrecpe_u32(a: uint32x2_t) -> uint32x2_t; } - unsafe { _vqsub_u16(a, b) } + unsafe { _vrecpe_u32(a) } } -#[doc = "Saturating subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_u16)"] +#[doc = "Unsigned reciprocal estimate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpeq_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrecpe))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqsub) + assert_instr(urecpe) )] #[cfg_attr( not(target_arch = "arm"), @@ -35394,84 +42531,70 @@ pub fn vqsub_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqsubq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { +pub fn vrecpeq_u32(a: uint32x4_t) -> uint32x4_t { unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrecpe.v4i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqsub.v8i16" + link_name = "llvm.aarch64.neon.urecpe.v4i32" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.v8i16")] - fn _vqsubq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t; + fn _vrecpeq_u32(a: uint32x4_t) -> uint32x4_t; } - unsafe { _vqsubq_u16(a, b) } + unsafe { _vrecpeq_u32(a) } } -#[doc = "Saturating subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_u32)"] +#[doc = "Floating-point reciprocal step"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecps_f16)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u32"))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrecps))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqsub) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + assert_instr(frecps) )] -pub fn vqsub_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +pub fn vrecps_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrecps.v4f16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqsub.v2i32" + link_name = "llvm.aarch64.neon.frecps.v4f16" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.v2i32")] - fn _vqsub_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t; + fn _vrecps_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t; } - unsafe { _vqsub_u32(a, b) } + unsafe { _vrecps_f16(a, b) } } -#[doc = "Saturating subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_u32)"] +#[doc = "Floating-point reciprocal step"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpsq_f16)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u32"))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrecps))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqsub) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + assert_instr(frecps) )] -pub fn vqsubq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +pub fn vrecpsq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrecps.v8f16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqsub.v4i32" + link_name = "llvm.aarch64.neon.frecps.v8f16" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.v4i32")] - fn _vqsubq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t; + fn _vrecpsq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t; } - unsafe { _vqsubq_u32(a, b) } + unsafe { _vrecpsq_f16(a, b) } } -#[doc = "Saturating subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_u64)"] +#[doc = "Floating-point reciprocal step"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecps_f32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u64"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrecps))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqsub) + assert_instr(frecps) )] #[cfg_attr( not(target_arch = "arm"), @@ -35481,26 +42604,26 @@ pub fn vqsubq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqsub_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { +pub fn vrecps_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrecps.v2f32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqsub.v1i64" + link_name = "llvm.aarch64.neon.frecps.v2f32" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.v1i64")] - fn _vqsub_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t; + fn _vrecps_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; } - unsafe { _vqsub_u64(a, b) } + unsafe { _vrecps_f32(a, b) } } -#[doc = "Saturating subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_u64)"] +#[doc = "Floating-point reciprocal step"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpsq_f32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u64"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrecps))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqsub) + assert_instr(frecps) )] #[cfg_attr( not(target_arch = "arm"), @@ -35510,653 +42633,633 @@ pub fn vqsub_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqsubq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { +pub fn vrecpsq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrecps.v4f32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqsub.v2i64" + link_name = "llvm.aarch64.neon.frecps.v4f32" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.v2i64")] - fn _vqsubq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t; + fn _vrecpsq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; } - unsafe { _vqsubq_u64(a, b) } + unsafe { _vrecpsq_f32(a, b) } } -#[doc = "Rounding Add returning High Narrow (high half)."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_high_s16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_f16)"] #[inline] -#[target_feature(enable = "neon")] +#[cfg(target_endian = "little")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(raddhn2) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + assert_instr(nop) )] -pub fn vraddhn_high_s16(a: int8x8_t, b: int16x8_t, c: int16x8_t) -> int8x16_t { - let x = vraddhn_s16(b, c); - unsafe { simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) } +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +pub fn vreinterpret_f32_f16(a: float16x4_t) -> float32x2_t { + unsafe { transmute(a) } } -#[doc = "Rounding Add returning High Narrow (high half)."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_high_s32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_f16)"] #[inline] -#[target_feature(enable = "neon")] +#[cfg(target_endian = "big")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(raddhn2) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + assert_instr(nop) )] -pub fn vraddhn_high_s32(a: int16x4_t, b: int32x4_t, c: int32x4_t) -> int16x8_t { - let x = vraddhn_s32(b, c); - unsafe { simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7]) } +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +pub fn vreinterpret_f32_f16(a: float16x4_t) -> float32x2_t { + let a: float16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; + unsafe { + let ret_val: float32x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) + } } -#[doc = "Rounding Add returning High Narrow (high half)."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_high_s64)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_f16)"] #[inline] -#[target_feature(enable = "neon")] +#[cfg(target_endian = "little")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i64"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(raddhn2) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + assert_instr(nop) )] -pub fn vraddhn_high_s64(a: int32x2_t, b: int64x2_t, c: int64x2_t) -> int32x4_t { - let x = vraddhn_s64(b, c); - unsafe { simd_shuffle!(a, x, [0, 1, 2, 3]) } +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +pub fn vreinterpret_s8_f16(a: float16x4_t) -> int8x8_t { + unsafe { transmute(a) } } -#[doc = "Rounding Add returning High Narrow (high half)."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_high_u16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_f16)"] #[inline] -#[target_feature(enable = "neon")] +#[cfg(target_endian = "big")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(raddhn2) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + assert_instr(nop) )] -pub fn vraddhn_high_u16(a: uint8x8_t, b: uint16x8_t, c: uint16x8_t) -> uint8x16_t { +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +pub fn vreinterpret_s8_f16(a: float16x4_t) -> int8x8_t { + let a: float16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { - let x: uint8x8_t = transmute(vraddhn_s16(transmute(b), transmute(c))); - simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) + let ret_val: int8x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } } -#[doc = "Rounding Add returning High Narrow (high half)."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_high_u32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_f16)"] #[inline] -#[target_feature(enable = "neon")] +#[cfg(target_endian = "little")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(raddhn2) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + assert_instr(nop) )] +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +pub fn vreinterpret_s16_f16(a: float16x4_t) -> int16x4_t { + unsafe { transmute(a) } +} +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_f16)"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) )] -pub fn vraddhn_high_u32(a: uint16x4_t, b: uint32x4_t, c: uint32x4_t) -> uint16x8_t { +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +pub fn vreinterpret_s16_f16(a: float16x4_t) -> int16x4_t { + let a: float16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { - let x: uint16x4_t = transmute(vraddhn_s32(transmute(b), transmute(c))); - simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7]) + let ret_val: int16x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } } -#[doc = "Rounding Add returning High Narrow (high half)."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_high_u64)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_f16)"] #[inline] -#[target_feature(enable = "neon")] +#[cfg(target_endian = "little")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i64"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(raddhn2) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + assert_instr(nop) )] +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +pub fn vreinterpret_s32_f16(a: float16x4_t) -> int32x2_t { + unsafe { transmute(a) } +} +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_f16)"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) )] -pub fn vraddhn_high_u64(a: uint32x2_t, b: uint64x2_t, c: uint64x2_t) -> uint32x4_t { +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +pub fn vreinterpret_s32_f16(a: float16x4_t) -> int32x2_t { + let a: float16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { - let x: uint32x2_t = transmute(vraddhn_s64(transmute(b), transmute(c))); - simd_shuffle!(a, x, [0, 1, 2, 3]) + let ret_val: int32x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } } -#[doc = "Rounding Add returning High Narrow."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_s16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_f16)"] #[inline] -#[target_feature(enable = "neon")] +#[cfg(target_endian = "little")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(raddhn) + assert_instr(nop) )] +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +pub fn vreinterpret_s64_f16(a: float16x4_t) -> int64x1_t { + unsafe { transmute(a) } +} +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_f16)"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) )] +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +pub fn vreinterpret_s64_f16(a: float16x4_t) -> int64x1_t { + let a: float16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; + unsafe { transmute(a) } +} +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_f16)"] +#[inline] +#[cfg(target_endian = "little")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) )] -pub fn vraddhn_s16(a: int16x8_t, b: int16x8_t) -> int8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.raddhn.v8i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vraddhn.v8i8")] - fn _vraddhn_s16(a: int16x8_t, b: int16x8_t) -> int8x8_t; - } - unsafe { _vraddhn_s16(a, b) } +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +pub fn vreinterpret_u8_f16(a: float16x4_t) -> uint8x8_t { + unsafe { transmute(a) } } -#[doc = "Rounding Add returning High Narrow."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_s32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_f16)"] #[inline] -#[target_feature(enable = "neon")] +#[cfg(target_endian = "big")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(raddhn) + assert_instr(nop) )] +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +pub fn vreinterpret_u8_f16(a: float16x4_t) -> uint8x8_t { + let a: float16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; + unsafe { + let ret_val: uint8x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + } +} +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_f16)"] +#[inline] +#[cfg(target_endian = "little")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) )] +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +pub fn vreinterpret_u16_f16(a: float16x4_t) -> uint16x4_t { + unsafe { transmute(a) } +} +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_f16)"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) )] -pub fn vraddhn_s32(a: int32x4_t, b: int32x4_t) -> int16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.raddhn.v4i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vraddhn.v4i16")] - fn _vraddhn_s32(a: int32x4_t, b: int32x4_t) -> int16x4_t; +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +pub fn vreinterpret_u16_f16(a: float16x4_t) -> uint16x4_t { + let a: float16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; + unsafe { + let ret_val: uint16x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } - unsafe { _vraddhn_s32(a, b) } } -#[doc = "Rounding Add returning High Narrow."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_s64)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_f16)"] #[inline] -#[target_feature(enable = "neon")] +#[cfg(target_endian = "little")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i64"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(raddhn) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + assert_instr(nop) )] +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +pub fn vreinterpret_u32_f16(a: float16x4_t) -> uint32x2_t { + unsafe { transmute(a) } +} +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_f16)"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) )] -pub fn vraddhn_s64(a: int64x2_t, b: int64x2_t) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.raddhn.v2i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vraddhn.v2i32")] - fn _vraddhn_s64(a: int64x2_t, b: int64x2_t) -> int32x2_t; +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +pub fn vreinterpret_u32_f16(a: float16x4_t) -> uint32x2_t { + let a: float16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; + unsafe { + let ret_val: uint32x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } - unsafe { _vraddhn_s64(a, b) } } -#[doc = "Rounding Add returning High Narrow."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_u16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_f16)"] #[inline] #[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(raddhn) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + assert_instr(nop) )] -pub fn vraddhn_u16(a: uint16x8_t, b: uint16x8_t) -> uint8x8_t { - unsafe { transmute(vraddhn_s16(transmute(a), transmute(b))) } +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +pub fn vreinterpret_u64_f16(a: float16x4_t) -> uint64x1_t { + unsafe { transmute(a) } } -#[doc = "Rounding Add returning High Narrow."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_u16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_f16)"] #[inline] #[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(raddhn) + assert_instr(nop) )] +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +pub fn vreinterpret_u64_f16(a: float16x4_t) -> uint64x1_t { + let a: float16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; + unsafe { transmute(a) } +} +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_f16)"] +#[inline] +#[cfg(target_endian = "little")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) )] +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +pub fn vreinterpret_p8_f16(a: float16x4_t) -> poly8x8_t { + unsafe { transmute(a) } +} +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_f16)"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) )] -pub fn vraddhn_u16(a: uint16x8_t, b: uint16x8_t) -> uint8x8_t { - let a: uint16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - let b: uint16x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) }; +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +pub fn vreinterpret_p8_f16(a: float16x4_t) -> poly8x8_t { + let a: float16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { - let ret_val: uint8x8_t = transmute(vraddhn_s16(transmute(a), transmute(b))); + let ret_val: poly8x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } } -#[doc = "Rounding Add returning High Narrow."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_u32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_f16)"] #[inline] #[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(raddhn) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + assert_instr(nop) )] -pub fn vraddhn_u32(a: uint32x4_t, b: uint32x4_t) -> uint16x4_t { - unsafe { transmute(vraddhn_s32(transmute(a), transmute(b))) } +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +pub fn vreinterpret_p16_f16(a: float16x4_t) -> poly16x4_t { + unsafe { transmute(a) } } -#[doc = "Rounding Add returning High Narrow."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_u32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_f16)"] #[inline] #[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(raddhn) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + assert_instr(nop) )] -pub fn vraddhn_u32(a: uint32x4_t, b: uint32x4_t) -> uint16x4_t { - let a: uint32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - let b: uint32x4_t = unsafe { simd_shuffle!(b, b, [3, 2, 1, 0]) }; +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +pub fn vreinterpret_p16_f16(a: float16x4_t) -> poly16x4_t { + let a: float16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { - let ret_val: uint16x4_t = transmute(vraddhn_s32(transmute(a), transmute(b))); + let ret_val: poly16x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } } -#[doc = "Rounding Add returning High Narrow."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_u64)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_f16)"] #[inline] #[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i64"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(raddhn) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + assert_instr(nop) )] -pub fn vraddhn_u64(a: uint64x2_t, b: uint64x2_t) -> uint32x2_t { - unsafe { transmute(vraddhn_s64(transmute(a), transmute(b))) } +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +pub fn vreinterpretq_f32_f16(a: float16x8_t) -> float32x4_t { + unsafe { transmute(a) } } -#[doc = "Rounding Add returning High Narrow."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_u64)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_f16)"] #[inline] #[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i64"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(raddhn) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + assert_instr(nop) )] -pub fn vraddhn_u64(a: uint64x2_t, b: uint64x2_t) -> uint32x2_t { - let a: uint64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - let b: uint64x2_t = unsafe { simd_shuffle!(b, b, [1, 0]) }; +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +pub fn vreinterpretq_f32_f16(a: float16x8_t) -> float32x4_t { + let a: float16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { - let ret_val: uint32x2_t = transmute(vraddhn_s64(transmute(a), transmute(b))); - simd_shuffle!(ret_val, ret_val, [1, 0]) + let ret_val: float32x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } } -#[doc = "Reciprocal estimate."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpe_f16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_f16)"] #[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrecpe))] +#[cfg(target_endian = "little")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(frecpe) + assert_instr(nop) )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vrecpe_f16(a: float16x4_t) -> float16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrecpe.v4f16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frecpe.v4f16" - )] - fn _vrecpe_f16(a: float16x4_t) -> float16x4_t; - } - unsafe { _vrecpe_f16(a) } +pub fn vreinterpretq_s8_f16(a: float16x8_t) -> int8x16_t { + unsafe { transmute(a) } } -#[doc = "Reciprocal estimate."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpeq_f16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_f16)"] #[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrecpe))] +#[cfg(target_endian = "big")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(frecpe) + assert_instr(nop) )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vrecpeq_f16(a: float16x8_t) -> float16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrecpe.v8f16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frecpe.v8f16" - )] - fn _vrecpeq_f16(a: float16x8_t) -> float16x8_t; +pub fn vreinterpretq_s8_f16(a: float16x8_t) -> int8x16_t { + let a: float16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; + unsafe { + let ret_val: int8x16_t = transmute(a); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) } - unsafe { _vrecpeq_f16(a) } } -#[doc = "Reciprocal estimate."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpe_f32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_f16)"] #[inline] -#[target_feature(enable = "neon")] +#[cfg(target_endian = "little")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrecpe))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(frecpe) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + assert_instr(nop) )] -pub fn vrecpe_f32(a: float32x2_t) -> float32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrecpe.v2f32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frecpe.v2f32" - )] - fn _vrecpe_f32(a: float32x2_t) -> float32x2_t; - } - unsafe { _vrecpe_f32(a) } +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +pub fn vreinterpretq_s16_f16(a: float16x8_t) -> int16x8_t { + unsafe { transmute(a) } } -#[doc = "Reciprocal estimate."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpeq_f32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_f16)"] #[inline] -#[target_feature(enable = "neon")] +#[cfg(target_endian = "big")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrecpe))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(frecpe) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + assert_instr(nop) )] -pub fn vrecpeq_f32(a: float32x4_t) -> float32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrecpe.v4f32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frecpe.v4f32" - )] - fn _vrecpeq_f32(a: float32x4_t) -> float32x4_t; +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +pub fn vreinterpretq_s16_f16(a: float16x8_t) -> int16x8_t { + let a: float16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; + unsafe { + let ret_val: int16x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - unsafe { _vrecpeq_f32(a) } } -#[doc = "Unsigned reciprocal estimate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpe_u32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_f16)"] #[inline] -#[target_feature(enable = "neon")] +#[cfg(target_endian = "little")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrecpe))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(urecpe) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + assert_instr(nop) )] +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +pub fn vreinterpretq_s32_f16(a: float16x8_t) -> int32x4_t { + unsafe { transmute(a) } +} +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_f16)"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) )] -pub fn vrecpe_u32(a: uint32x2_t) -> uint32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrecpe.v2i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.urecpe.v2i32" - )] - fn _vrecpe_u32(a: uint32x2_t) -> uint32x2_t; +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +pub fn vreinterpretq_s32_f16(a: float16x8_t) -> int32x4_t { + let a: float16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; + unsafe { + let ret_val: int32x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } - unsafe { _vrecpe_u32(a) } } -#[doc = "Unsigned reciprocal estimate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpeq_u32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_f16)"] #[inline] -#[target_feature(enable = "neon")] +#[cfg(target_endian = "little")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrecpe))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(urecpe) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + assert_instr(nop) )] +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +pub fn vreinterpretq_s64_f16(a: float16x8_t) -> int64x2_t { + unsafe { transmute(a) } +} +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_f16)"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) )] -pub fn vrecpeq_u32(a: uint32x4_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrecpe.v4i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.urecpe.v4i32" - )] - fn _vrecpeq_u32(a: uint32x4_t) -> uint32x4_t; +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +pub fn vreinterpretq_s64_f16(a: float16x8_t) -> int64x2_t { + let a: float16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; + unsafe { + let ret_val: int64x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } - unsafe { _vrecpeq_u32(a) } } -#[doc = "Floating-point reciprocal step"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecps_f16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_f16)"] #[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrecps))] +#[cfg(target_endian = "little")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(frecps) + assert_instr(nop) )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vrecps_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrecps.v4f16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frecps.v4f16" - )] - fn _vrecps_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t; - } - unsafe { _vrecps_f16(a, b) } +pub fn vreinterpretq_u8_f16(a: float16x8_t) -> uint8x16_t { + unsafe { transmute(a) } } -#[doc = "Floating-point reciprocal step"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpsq_f16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_f16)"] #[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrecps))] +#[cfg(target_endian = "big")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(frecps) + assert_instr(nop) )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vrecpsq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrecps.v8f16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frecps.v8f16" - )] - fn _vrecpsq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t; +pub fn vreinterpretq_u8_f16(a: float16x8_t) -> uint8x16_t { + let a: float16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; + unsafe { + let ret_val: uint8x16_t = transmute(a); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) } - unsafe { _vrecpsq_f16(a, b) } } -#[doc = "Floating-point reciprocal step"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecps_f32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_f16)"] #[inline] -#[target_feature(enable = "neon")] +#[cfg(target_endian = "little")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrecps))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(frecps) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + assert_instr(nop) )] -pub fn vrecps_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrecps.v2f32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frecps.v2f32" - )] - fn _vrecps_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; - } - unsafe { _vrecps_f32(a, b) } +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +pub fn vreinterpretq_u16_f16(a: float16x8_t) -> uint16x8_t { + unsafe { transmute(a) } } -#[doc = "Floating-point reciprocal step"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpsq_f32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_f16)"] #[inline] -#[target_feature(enable = "neon")] +#[cfg(target_endian = "big")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrecps))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(frecps) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + assert_instr(nop) )] -pub fn vrecpsq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrecps.v4f32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frecps.v4f32" - )] - fn _vrecpsq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +pub fn vreinterpretq_u16_f16(a: float16x8_t) -> uint16x8_t { + let a: float16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; + unsafe { + let ret_val: uint16x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - unsafe { _vrecpsq_f32(a, b) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_f16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_f16)"] #[inline] #[cfg(target_endian = "little")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -36167,11 +43270,11 @@ pub fn vrecpsq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpret_f32_f16(a: float16x4_t) -> float32x2_t { +pub fn vreinterpretq_u32_f16(a: float16x8_t) -> uint32x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_f16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_f16)"] #[inline] #[cfg(target_endian = "big")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -36182,15 +43285,15 @@ pub fn vreinterpret_f32_f16(a: float16x4_t) -> float32x2_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpret_f32_f16(a: float16x4_t) -> float32x2_t { - let a: float16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; +pub fn vreinterpretq_u32_f16(a: float16x8_t) -> uint32x4_t { + let a: float16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { - let ret_val: float32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) + let ret_val: uint32x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_f16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_f16)"] #[inline] #[cfg(target_endian = "little")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -36201,11 +43304,11 @@ pub fn vreinterpret_f32_f16(a: float16x4_t) -> float32x2_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpret_s8_f16(a: float16x4_t) -> int8x8_t { +pub fn vreinterpretq_u64_f16(a: float16x8_t) -> uint64x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_f16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_f16)"] #[inline] #[cfg(target_endian = "big")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -36216,15 +43319,15 @@ pub fn vreinterpret_s8_f16(a: float16x4_t) -> int8x8_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpret_s8_f16(a: float16x4_t) -> int8x8_t { - let a: float16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; +pub fn vreinterpretq_u64_f16(a: float16x8_t) -> uint64x2_t { + let a: float16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { - let ret_val: int8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + let ret_val: uint64x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_f16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_f16)"] #[inline] #[cfg(target_endian = "little")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -36235,11 +43338,11 @@ pub fn vreinterpret_s8_f16(a: float16x4_t) -> int8x8_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpret_s16_f16(a: float16x4_t) -> int16x4_t { +pub fn vreinterpretq_p8_f16(a: float16x8_t) -> poly8x16_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_f16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_f16)"] #[inline] #[cfg(target_endian = "big")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -36250,15 +43353,19 @@ pub fn vreinterpret_s16_f16(a: float16x4_t) -> int16x4_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpret_s16_f16(a: float16x4_t) -> int16x4_t { - let a: float16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; +pub fn vreinterpretq_p8_f16(a: float16x8_t) -> poly8x16_t { + let a: float16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { - let ret_val: int16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + let ret_val: poly8x16_t = transmute(a); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_f16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_f16)"] #[inline] #[cfg(target_endian = "little")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -36269,11 +43376,11 @@ pub fn vreinterpret_s16_f16(a: float16x4_t) -> int16x4_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpret_s32_f16(a: float16x4_t) -> int32x2_t { +pub fn vreinterpretq_p16_f16(a: float16x8_t) -> poly16x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_f16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_f16)"] #[inline] #[cfg(target_endian = "big")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -36284,15 +43391,15 @@ pub fn vreinterpret_s32_f16(a: float16x4_t) -> int32x2_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpret_s32_f16(a: float16x4_t) -> int32x2_t { - let a: float16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; +pub fn vreinterpretq_p16_f16(a: float16x8_t) -> poly16x8_t { + let a: float16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { - let ret_val: int32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) + let ret_val: poly16x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_f16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_f32)"] #[inline] #[cfg(target_endian = "little")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -36303,11 +43410,11 @@ pub fn vreinterpret_s32_f16(a: float16x4_t) -> int32x2_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpret_s64_f16(a: float16x4_t) -> int64x1_t { +pub fn vreinterpret_f16_f32(a: float32x2_t) -> float16x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_f16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_f32)"] #[inline] #[cfg(target_endian = "big")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -36318,12 +43425,15 @@ pub fn vreinterpret_s64_f16(a: float16x4_t) -> int64x1_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpret_s64_f16(a: float16x4_t) -> int64x1_t { - let a: float16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - unsafe { transmute(a) } +pub fn vreinterpret_f16_f32(a: float32x2_t) -> float16x4_t { + let a: float32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; + unsafe { + let ret_val: float16x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_f16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_f32)"] #[inline] #[cfg(target_endian = "little")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -36334,11 +43444,11 @@ pub fn vreinterpret_s64_f16(a: float16x4_t) -> int64x1_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpret_u8_f16(a: float16x4_t) -> uint8x8_t { +pub fn vreinterpretq_f16_f32(a: float32x4_t) -> float16x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_f16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_f32)"] #[inline] #[cfg(target_endian = "big")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -36349,15 +43459,15 @@ pub fn vreinterpret_u8_f16(a: float16x4_t) -> uint8x8_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpret_u8_f16(a: float16x4_t) -> uint8x8_t { - let a: float16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; +pub fn vreinterpretq_f16_f32(a: float32x4_t) -> float16x8_t { + let a: float32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { - let ret_val: uint8x8_t = transmute(a); + let ret_val: float16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_f16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_s8)"] #[inline] #[cfg(target_endian = "little")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -36368,11 +43478,11 @@ pub fn vreinterpret_u8_f16(a: float16x4_t) -> uint8x8_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpret_u16_f16(a: float16x4_t) -> uint16x4_t { +pub fn vreinterpret_f16_s8(a: int8x8_t) -> float16x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_f16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_s8)"] #[inline] #[cfg(target_endian = "big")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -36383,15 +43493,15 @@ pub fn vreinterpret_u16_f16(a: float16x4_t) -> uint16x4_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpret_u16_f16(a: float16x4_t) -> uint16x4_t { - let a: float16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; +pub fn vreinterpret_f16_s8(a: int8x8_t) -> float16x4_t { + let a: int8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { - let ret_val: uint16x4_t = transmute(a); + let ret_val: float16x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_f16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_s8)"] #[inline] #[cfg(target_endian = "little")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -36402,11 +43512,11 @@ pub fn vreinterpret_u16_f16(a: float16x4_t) -> uint16x4_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpret_u32_f16(a: float16x4_t) -> uint32x2_t { +pub fn vreinterpretq_f16_s8(a: int8x16_t) -> float16x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_f16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_s8)"] #[inline] #[cfg(target_endian = "big")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -36417,15 +43527,16 @@ pub fn vreinterpret_u32_f16(a: float16x4_t) -> uint32x2_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpret_u32_f16(a: float16x4_t) -> uint32x2_t { - let a: float16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; +pub fn vreinterpretq_f16_s8(a: int8x16_t) -> float16x8_t { + let a: int8x16_t = + unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { - let ret_val: uint32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) + let ret_val: float16x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_f16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_s16)"] #[inline] #[cfg(target_endian = "little")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -36436,11 +43547,11 @@ pub fn vreinterpret_u32_f16(a: float16x4_t) -> uint32x2_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpret_u64_f16(a: float16x4_t) -> uint64x1_t { +pub fn vreinterpret_f16_s16(a: int16x4_t) -> float16x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_f16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_s16)"] #[inline] #[cfg(target_endian = "big")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -36451,12 +43562,15 @@ pub fn vreinterpret_u64_f16(a: float16x4_t) -> uint64x1_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpret_u64_f16(a: float16x4_t) -> uint64x1_t { - let a: float16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - unsafe { transmute(a) } +pub fn vreinterpret_f16_s16(a: int16x4_t) -> float16x4_t { + let a: int16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; + unsafe { + let ret_val: float16x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_f16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_s16)"] #[inline] #[cfg(target_endian = "little")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -36467,11 +43581,11 @@ pub fn vreinterpret_u64_f16(a: float16x4_t) -> uint64x1_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpret_p8_f16(a: float16x4_t) -> poly8x8_t { +pub fn vreinterpretq_f16_s16(a: int16x8_t) -> float16x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_f16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_s16)"] #[inline] #[cfg(target_endian = "big")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -36482,15 +43596,15 @@ pub fn vreinterpret_p8_f16(a: float16x4_t) -> poly8x8_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpret_p8_f16(a: float16x4_t) -> poly8x8_t { - let a: float16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; +pub fn vreinterpretq_f16_s16(a: int16x8_t) -> float16x8_t { + let a: int16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { - let ret_val: poly8x8_t = transmute(a); + let ret_val: float16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_f16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_s32)"] #[inline] #[cfg(target_endian = "little")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -36501,11 +43615,11 @@ pub fn vreinterpret_p8_f16(a: float16x4_t) -> poly8x8_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpret_p16_f16(a: float16x4_t) -> poly16x4_t { +pub fn vreinterpret_f16_s32(a: int32x2_t) -> float16x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_f16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_s32)"] #[inline] #[cfg(target_endian = "big")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -36516,15 +43630,15 @@ pub fn vreinterpret_p16_f16(a: float16x4_t) -> poly16x4_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpret_p16_f16(a: float16x4_t) -> poly16x4_t { - let a: float16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; +pub fn vreinterpret_f16_s32(a: int32x2_t) -> float16x4_t { + let a: int32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { - let ret_val: poly16x4_t = transmute(a); + let ret_val: float16x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_f16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_s32)"] #[inline] #[cfg(target_endian = "little")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -36535,11 +43649,11 @@ pub fn vreinterpret_p16_f16(a: float16x4_t) -> poly16x4_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpretq_f32_f16(a: float16x8_t) -> float32x4_t { +pub fn vreinterpretq_f16_s32(a: int32x4_t) -> float16x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_f16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_s32)"] #[inline] #[cfg(target_endian = "big")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -36550,15 +43664,15 @@ pub fn vreinterpretq_f32_f16(a: float16x8_t) -> float32x4_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpretq_f32_f16(a: float16x8_t) -> float32x4_t { - let a: float16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; +pub fn vreinterpretq_f16_s32(a: int32x4_t) -> float16x8_t { + let a: int32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { - let ret_val: float32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + let ret_val: float16x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_f16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_s64)"] #[inline] #[cfg(target_endian = "little")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -36569,11 +43683,11 @@ pub fn vreinterpretq_f32_f16(a: float16x8_t) -> float32x4_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpretq_s8_f16(a: float16x8_t) -> int8x16_t { +pub fn vreinterpret_f16_s64(a: int64x1_t) -> float16x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_f16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_s64)"] #[inline] #[cfg(target_endian = "big")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -36584,19 +43698,14 @@ pub fn vreinterpretq_s8_f16(a: float16x8_t) -> int8x16_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpretq_s8_f16(a: float16x8_t) -> int8x16_t { - let a: float16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; +pub fn vreinterpret_f16_s64(a: int64x1_t) -> float16x4_t { unsafe { - let ret_val: int8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) + let ret_val: float16x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_f16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_s64)"] #[inline] #[cfg(target_endian = "little")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -36607,11 +43716,11 @@ pub fn vreinterpretq_s8_f16(a: float16x8_t) -> int8x16_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpretq_s16_f16(a: float16x8_t) -> int16x8_t { +pub fn vreinterpretq_f16_s64(a: int64x2_t) -> float16x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_f16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_s64)"] #[inline] #[cfg(target_endian = "big")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -36622,15 +43731,15 @@ pub fn vreinterpretq_s16_f16(a: float16x8_t) -> int16x8_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpretq_s16_f16(a: float16x8_t) -> int16x8_t { - let a: float16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; +pub fn vreinterpretq_f16_s64(a: int64x2_t) -> float16x8_t { + let a: int64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { - let ret_val: int16x8_t = transmute(a); + let ret_val: float16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_f16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_u8)"] #[inline] #[cfg(target_endian = "little")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -36641,11 +43750,11 @@ pub fn vreinterpretq_s16_f16(a: float16x8_t) -> int16x8_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpretq_s32_f16(a: float16x8_t) -> int32x4_t { +pub fn vreinterpret_f16_u8(a: uint8x8_t) -> float16x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_f16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_u8)"] #[inline] #[cfg(target_endian = "big")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -36656,15 +43765,15 @@ pub fn vreinterpretq_s32_f16(a: float16x8_t) -> int32x4_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpretq_s32_f16(a: float16x8_t) -> int32x4_t { - let a: float16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; +pub fn vreinterpret_f16_u8(a: uint8x8_t) -> float16x4_t { + let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { - let ret_val: int32x4_t = transmute(a); + let ret_val: float16x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_f16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_u8)"] #[inline] #[cfg(target_endian = "little")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -36675,11 +43784,11 @@ pub fn vreinterpretq_s32_f16(a: float16x8_t) -> int32x4_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpretq_s64_f16(a: float16x8_t) -> int64x2_t { +pub fn vreinterpretq_f16_u8(a: uint8x16_t) -> float16x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_f16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_u8)"] #[inline] #[cfg(target_endian = "big")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -36690,15 +43799,16 @@ pub fn vreinterpretq_s64_f16(a: float16x8_t) -> int64x2_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpretq_s64_f16(a: float16x8_t) -> int64x2_t { - let a: float16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; +pub fn vreinterpretq_f16_u8(a: uint8x16_t) -> float16x8_t { + let a: uint8x16_t = + unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { - let ret_val: int64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) + let ret_val: float16x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_f16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_u16)"] #[inline] #[cfg(target_endian = "little")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -36709,11 +43819,11 @@ pub fn vreinterpretq_s64_f16(a: float16x8_t) -> int64x2_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpretq_u8_f16(a: float16x8_t) -> uint8x16_t { +pub fn vreinterpret_f16_u16(a: uint16x4_t) -> float16x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_f16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_u16)"] #[inline] #[cfg(target_endian = "big")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -36724,19 +43834,15 @@ pub fn vreinterpretq_u8_f16(a: float16x8_t) -> uint8x16_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpretq_u8_f16(a: float16x8_t) -> uint8x16_t { - let a: float16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; +pub fn vreinterpret_f16_u16(a: uint16x4_t) -> float16x4_t { + let a: uint16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { - let ret_val: uint8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) + let ret_val: float16x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_f16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_u16)"] #[inline] #[cfg(target_endian = "little")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -36747,11 +43853,11 @@ pub fn vreinterpretq_u8_f16(a: float16x8_t) -> uint8x16_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpretq_u16_f16(a: float16x8_t) -> uint16x8_t { +pub fn vreinterpretq_f16_u16(a: uint16x8_t) -> float16x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_f16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_u16)"] #[inline] #[cfg(target_endian = "big")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -36762,15 +43868,15 @@ pub fn vreinterpretq_u16_f16(a: float16x8_t) -> uint16x8_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpretq_u16_f16(a: float16x8_t) -> uint16x8_t { - let a: float16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; +pub fn vreinterpretq_f16_u16(a: uint16x8_t) -> float16x8_t { + let a: uint16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { - let ret_val: uint16x8_t = transmute(a); + let ret_val: float16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_f16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_u32)"] #[inline] #[cfg(target_endian = "little")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -36781,11 +43887,11 @@ pub fn vreinterpretq_u16_f16(a: float16x8_t) -> uint16x8_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpretq_u32_f16(a: float16x8_t) -> uint32x4_t { +pub fn vreinterpret_f16_u32(a: uint32x2_t) -> float16x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_f16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_u32)"] #[inline] #[cfg(target_endian = "big")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -36796,15 +43902,15 @@ pub fn vreinterpretq_u32_f16(a: float16x8_t) -> uint32x4_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpretq_u32_f16(a: float16x8_t) -> uint32x4_t { - let a: float16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; +pub fn vreinterpret_f16_u32(a: uint32x2_t) -> float16x4_t { + let a: uint32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { - let ret_val: uint32x4_t = transmute(a); + let ret_val: float16x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_f16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_u32)"] #[inline] #[cfg(target_endian = "little")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -36815,11 +43921,11 @@ pub fn vreinterpretq_u32_f16(a: float16x8_t) -> uint32x4_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpretq_u64_f16(a: float16x8_t) -> uint64x2_t { +pub fn vreinterpretq_f16_u32(a: uint32x4_t) -> float16x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_f16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_u32)"] #[inline] #[cfg(target_endian = "big")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -36830,15 +43936,15 @@ pub fn vreinterpretq_u64_f16(a: float16x8_t) -> uint64x2_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpretq_u64_f16(a: float16x8_t) -> uint64x2_t { - let a: float16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; +pub fn vreinterpretq_f16_u32(a: uint32x4_t) -> float16x8_t { + let a: uint32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { - let ret_val: uint64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) + let ret_val: float16x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_f16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_u64)"] #[inline] #[cfg(target_endian = "little")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -36849,11 +43955,11 @@ pub fn vreinterpretq_u64_f16(a: float16x8_t) -> uint64x2_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpretq_p8_f16(a: float16x8_t) -> poly8x16_t { +pub fn vreinterpret_f16_u64(a: uint64x1_t) -> float16x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_f16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_u64)"] #[inline] #[cfg(target_endian = "big")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -36864,19 +43970,14 @@ pub fn vreinterpretq_p8_f16(a: float16x8_t) -> poly8x16_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpretq_p8_f16(a: float16x8_t) -> poly8x16_t { - let a: float16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; +pub fn vreinterpret_f16_u64(a: uint64x1_t) -> float16x4_t { unsafe { - let ret_val: poly8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) + let ret_val: float16x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_f16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_u64)"] #[inline] #[cfg(target_endian = "little")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -36887,11 +43988,11 @@ pub fn vreinterpretq_p8_f16(a: float16x8_t) -> poly8x16_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpretq_p16_f16(a: float16x8_t) -> poly16x8_t { +pub fn vreinterpretq_f16_u64(a: uint64x2_t) -> float16x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_f16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_u64)"] #[inline] #[cfg(target_endian = "big")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -36902,15 +44003,15 @@ pub fn vreinterpretq_p16_f16(a: float16x8_t) -> poly16x8_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpretq_p16_f16(a: float16x8_t) -> poly16x8_t { - let a: float16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; +pub fn vreinterpretq_f16_u64(a: uint64x2_t) -> float16x8_t { + let a: uint64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { - let ret_val: poly16x8_t = transmute(a); + let ret_val: float16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_p8)"] #[inline] #[cfg(target_endian = "little")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -36921,11 +44022,11 @@ pub fn vreinterpretq_p16_f16(a: float16x8_t) -> poly16x8_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpret_f16_f32(a: float32x2_t) -> float16x4_t { +pub fn vreinterpret_f16_p8(a: poly8x8_t) -> float16x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_p8)"] #[inline] #[cfg(target_endian = "big")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -36936,15 +44037,15 @@ pub fn vreinterpret_f16_f32(a: float32x2_t) -> float16x4_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpret_f16_f32(a: float32x2_t) -> float16x4_t { - let a: float32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; +pub fn vreinterpret_f16_p8(a: poly8x8_t) -> float16x4_t { + let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { let ret_val: float16x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_p8)"] #[inline] #[cfg(target_endian = "little")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -36955,11 +44056,11 @@ pub fn vreinterpret_f16_f32(a: float32x2_t) -> float16x4_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpretq_f16_f32(a: float32x4_t) -> float16x8_t { +pub fn vreinterpretq_f16_p8(a: poly8x16_t) -> float16x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_p8)"] #[inline] #[cfg(target_endian = "big")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -36970,15 +44071,16 @@ pub fn vreinterpretq_f16_f32(a: float32x4_t) -> float16x8_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpretq_f16_f32(a: float32x4_t) -> float16x8_t { - let a: float32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; +pub fn vreinterpretq_f16_p8(a: poly8x16_t) -> float16x8_t { + let a: poly8x16_t = + unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { let ret_val: float16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_p16)"] #[inline] #[cfg(target_endian = "little")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -36989,11 +44091,11 @@ pub fn vreinterpretq_f16_f32(a: float32x4_t) -> float16x8_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpret_f16_s8(a: int8x8_t) -> float16x4_t { +pub fn vreinterpret_f16_p16(a: poly16x4_t) -> float16x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_p16)"] #[inline] #[cfg(target_endian = "big")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -37004,15 +44106,15 @@ pub fn vreinterpret_f16_s8(a: int8x8_t) -> float16x4_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpret_f16_s8(a: int8x8_t) -> float16x4_t { - let a: int8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; +pub fn vreinterpret_f16_p16(a: poly16x4_t) -> float16x4_t { + let a: poly16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { let ret_val: float16x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_p16)"] #[inline] #[cfg(target_endian = "little")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -37023,11 +44125,11 @@ pub fn vreinterpret_f16_s8(a: int8x8_t) -> float16x4_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpretq_f16_s8(a: int8x16_t) -> float16x8_t { +pub fn vreinterpretq_f16_p16(a: poly16x8_t) -> float16x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_p16)"] #[inline] #[cfg(target_endian = "big")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -37038,19 +44140,18 @@ pub fn vreinterpretq_f16_s8(a: int8x16_t) -> float16x8_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpretq_f16_s8(a: int8x16_t) -> float16x8_t { - let a: int8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; +pub fn vreinterpretq_f16_p16(a: poly16x8_t) -> float16x8_t { + let a: poly16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { let ret_val: float16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_p128)"] #[inline] #[cfg(target_endian = "little")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -37058,14 +44159,14 @@ pub fn vreinterpretq_f16_s8(a: int8x16_t) -> float16x8_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpret_f16_s16(a: int16x4_t) -> float16x4_t { +pub fn vreinterpretq_f16_p128(a: p128) -> float16x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_p128)"] #[inline] #[cfg(target_endian = "big")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -37073,18 +44174,17 @@ pub fn vreinterpret_f16_s16(a: int16x4_t) -> float16x4_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpret_f16_s16(a: int16x4_t) -> float16x4_t { - let a: int16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; +pub fn vreinterpretq_f16_p128(a: p128) -> float16x8_t { unsafe { - let ret_val: float16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + let ret_val: float16x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_f16)"] #[inline] #[cfg(target_endian = "little")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -37092,14 +44192,14 @@ pub fn vreinterpret_f16_s16(a: int16x4_t) -> float16x4_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpretq_f16_s16(a: int16x8_t) -> float16x8_t { +pub fn vreinterpret_p64_f16(a: float16x4_t) -> poly64x1_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_f16)"] #[inline] #[cfg(target_endian = "big")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -37107,18 +44207,80 @@ pub fn vreinterpretq_f16_s16(a: int16x8_t) -> float16x8_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpretq_f16_s16(a: int16x8_t) -> float16x8_t { - let a: int16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; +pub fn vreinterpret_p64_f16(a: float16x4_t) -> poly64x1_t { + let a: float16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; + unsafe { transmute(a) } +} +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_f16)"] +#[inline] +#[cfg(target_endian = "little")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +pub fn vreinterpretq_p128_f16(a: float16x8_t) -> p128 { + unsafe { transmute(a) } +} +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_f16)"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +pub fn vreinterpretq_p128_f16(a: float16x8_t) -> p128 { + let a: float16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; + unsafe { transmute(a) } +} +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_f16)"] +#[inline] +#[cfg(target_endian = "little")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +pub fn vreinterpretq_p64_f16(a: float16x8_t) -> poly64x2_t { + unsafe { transmute(a) } +} +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_f16)"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +pub fn vreinterpretq_p64_f16(a: float16x8_t) -> poly64x2_t { + let a: float16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { - let ret_val: float16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + let ret_val: poly64x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_p64)"] #[inline] #[cfg(target_endian = "little")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -37126,14 +44288,14 @@ pub fn vreinterpretq_f16_s16(a: int16x8_t) -> float16x8_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpret_f16_s32(a: int32x2_t) -> float16x4_t { +pub fn vreinterpret_f16_p64(a: poly64x1_t) -> float16x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_p64)"] #[inline] #[cfg(target_endian = "big")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -37141,18 +44303,17 @@ pub fn vreinterpret_f16_s32(a: int32x2_t) -> float16x4_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpret_f16_s32(a: int32x2_t) -> float16x4_t { - let a: int32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; +pub fn vreinterpret_f16_p64(a: poly64x1_t) -> float16x4_t { unsafe { let ret_val: float16x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_p64)"] #[inline] #[cfg(target_endian = "little")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -37160,14 +44321,14 @@ pub fn vreinterpret_f16_s32(a: int32x2_t) -> float16x4_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpretq_f16_s32(a: int32x4_t) -> float16x8_t { +pub fn vreinterpretq_f16_p64(a: poly64x2_t) -> float16x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_p64)"] #[inline] #[cfg(target_endian = "big")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -37175,687 +44336,973 @@ pub fn vreinterpretq_f16_s32(a: int32x4_t) -> float16x8_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpretq_f16_s32(a: int32x4_t) -> float16x8_t { - let a: int32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; +pub fn vreinterpretq_f16_p64(a: poly64x2_t) -> float16x8_t { + let a: poly64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { let ret_val: float16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_s64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_p128)"] #[inline] #[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(nop) )] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpret_f16_s64(a: int64x1_t) -> float16x4_t { +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vreinterpretq_f32_p128(a: p128) -> float32x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_s64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_p128)"] #[inline] #[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(nop) )] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpret_f16_s64(a: int64x1_t) -> float16x4_t { +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vreinterpretq_f32_p128(a: p128) -> float32x4_t { unsafe { - let ret_val: float16x4_t = transmute(a); + let ret_val: float32x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_s64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_f32)"] #[inline] #[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(nop) )] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpretq_f16_s64(a: int64x2_t) -> float16x8_t { +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vreinterpret_s8_f32(a: float32x2_t) -> int8x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_s64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_f32)"] #[inline] #[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(nop) )] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpretq_f16_s64(a: int64x2_t) -> float16x8_t { - let a: int64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vreinterpret_s8_f32(a: float32x2_t) -> int8x8_t { + let a: float32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { - let ret_val: float16x8_t = transmute(a); + let ret_val: int8x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_f32)"] #[inline] #[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(nop) )] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpret_f16_u8(a: uint8x8_t) -> float16x4_t { +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vreinterpret_s16_f32(a: float32x2_t) -> int16x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_f32)"] #[inline] #[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(nop) )] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpret_f16_u8(a: uint8x8_t) -> float16x4_t { - let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vreinterpret_s16_f32(a: float32x2_t) -> int16x4_t { + let a: float32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { - let ret_val: float16x4_t = transmute(a); + let ret_val: int16x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_f32)"] #[inline] #[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(nop) )] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpretq_f16_u8(a: uint8x16_t) -> float16x8_t { +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vreinterpret_s32_f32(a: float32x2_t) -> int32x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_f32)"] #[inline] #[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(nop) )] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpretq_f16_u8(a: uint8x16_t) -> float16x8_t { - let a: uint8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vreinterpret_s32_f32(a: float32x2_t) -> int32x2_t { + let a: float32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { - let ret_val: float16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + let ret_val: int32x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_f32)"] #[inline] #[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(nop) )] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpret_f16_u16(a: uint16x4_t) -> float16x4_t { +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vreinterpret_s64_f32(a: float32x2_t) -> int64x1_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_f32)"] #[inline] #[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(nop) )] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpret_f16_u16(a: uint16x4_t) -> float16x4_t { - let a: uint16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - unsafe { - let ret_val: float16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vreinterpret_s64_f32(a: float32x2_t) -> int64x1_t { + let a: float32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_f32)"] #[inline] #[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(nop) )] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpretq_f16_u16(a: uint16x8_t) -> float16x8_t { +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vreinterpret_u8_f32(a: float32x2_t) -> uint8x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_f32)"] #[inline] #[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(nop) )] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpretq_f16_u16(a: uint16x8_t) -> float16x8_t { - let a: uint16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vreinterpret_u8_f32(a: float32x2_t) -> uint8x8_t { + let a: float32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { - let ret_val: float16x8_t = transmute(a); + let ret_val: uint8x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_f32)"] #[inline] #[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(nop) )] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpret_f16_u32(a: uint32x2_t) -> float16x4_t { +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vreinterpret_u16_f32(a: float32x2_t) -> uint16x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_f32)"] #[inline] #[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(nop) )] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpret_f16_u32(a: uint32x2_t) -> float16x4_t { - let a: uint32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vreinterpret_u16_f32(a: float32x2_t) -> uint16x4_t { + let a: float32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { - let ret_val: float16x4_t = transmute(a); + let ret_val: uint16x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_f32)"] #[inline] #[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(nop) )] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpretq_f16_u32(a: uint32x4_t) -> float16x8_t { +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vreinterpret_u32_f32(a: float32x2_t) -> uint32x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_f32)"] #[inline] #[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(nop) )] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpretq_f16_u32(a: uint32x4_t) -> float16x8_t { - let a: uint32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vreinterpret_u32_f32(a: float32x2_t) -> uint32x2_t { + let a: float32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { - let ret_val: float16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + let ret_val: uint32x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_u64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_f32)"] #[inline] #[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(nop) )] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpret_f16_u64(a: uint64x1_t) -> float16x4_t { +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vreinterpret_u64_f32(a: float32x2_t) -> uint64x1_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_u64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_f32)"] #[inline] #[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(nop) )] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpret_f16_u64(a: uint64x1_t) -> float16x4_t { - unsafe { - let ret_val: float16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vreinterpret_u64_f32(a: float32x2_t) -> uint64x1_t { + let a: float32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_u64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_f32)"] #[inline] #[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(nop) )] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpretq_f16_u64(a: uint64x2_t) -> float16x8_t { +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vreinterpret_p8_f32(a: float32x2_t) -> poly8x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_u64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_f32)"] #[inline] #[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(nop) )] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpretq_f16_u64(a: uint64x2_t) -> float16x8_t { - let a: uint64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vreinterpret_p8_f32(a: float32x2_t) -> poly8x8_t { + let a: float32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { - let ret_val: float16x8_t = transmute(a); + let ret_val: poly8x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_f32)"] #[inline] #[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(nop) )] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpret_f16_p8(a: poly8x8_t) -> float16x4_t { +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vreinterpret_p16_f32(a: float32x2_t) -> poly16x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_f32)"] #[inline] #[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(nop) )] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpret_f16_p8(a: poly8x8_t) -> float16x4_t { - let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vreinterpret_p16_f32(a: float32x2_t) -> poly16x4_t { + let a: float32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { - let ret_val: float16x4_t = transmute(a); + let ret_val: poly16x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_f32)"] #[inline] #[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(nop) )] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpretq_f16_p8(a: poly8x16_t) -> float16x8_t { +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vreinterpretq_p128_f32(a: float32x4_t) -> p128 { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_f32)"] #[inline] #[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(nop) )] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpretq_f16_p8(a: poly8x16_t) -> float16x8_t { - let a: poly8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: float16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vreinterpretq_p128_f32(a: float32x4_t) -> p128 { + let a: float32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_f32)"] #[inline] #[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(nop) )] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpret_f16_p16(a: poly16x4_t) -> float16x4_t { +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vreinterpretq_s8_f32(a: float32x4_t) -> int8x16_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_f32)"] #[inline] #[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(nop) )] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpret_f16_p16(a: poly16x4_t) -> float16x4_t { - let a: poly16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vreinterpretq_s8_f32(a: float32x4_t) -> int8x16_t { + let a: float32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { - let ret_val: float16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + let ret_val: int8x16_t = transmute(a); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_f32)"] #[inline] #[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(nop) )] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpretq_f16_p16(a: poly16x8_t) -> float16x8_t { +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vreinterpretq_s16_f32(a: float32x4_t) -> int16x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_f32)"] #[inline] #[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(nop) )] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpretq_f16_p16(a: poly16x8_t) -> float16x8_t { - let a: poly16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vreinterpretq_s16_f32(a: float32x4_t) -> int16x8_t { + let a: float32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { - let ret_val: float16x8_t = transmute(a); + let ret_val: int16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_p128)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_f32)"] #[inline] #[cfg(target_endian = "little")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(nop) )] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpretq_f16_p128(a: p128) -> float16x8_t { +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vreinterpretq_s32_f32(a: float32x4_t) -> int32x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_p128)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_f32)"] #[inline] #[cfg(target_endian = "big")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpretq_f16_p128(a: p128) -> float16x8_t { +pub fn vreinterpretq_s32_f32(a: float32x4_t) -> int32x4_t { + let a: float32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { - let ret_val: float16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + let ret_val: int32x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_f16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_f32)"] #[inline] #[cfg(target_endian = "little")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(nop) )] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpret_p64_f16(a: float16x4_t) -> poly64x1_t { +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vreinterpretq_s64_f32(a: float32x4_t) -> int64x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_f16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_f32)"] #[inline] #[cfg(target_endian = "big")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(nop) )] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpret_p64_f16(a: float16x4_t) -> poly64x1_t { - let a: float16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - unsafe { transmute(a) } +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vreinterpretq_s64_f32(a: float32x4_t) -> int64x2_t { + let a: float32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; + unsafe { + let ret_val: int64x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) + } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_f16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_f32)"] #[inline] #[cfg(target_endian = "little")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(nop) )] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpretq_p128_f16(a: float16x8_t) -> p128 { +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vreinterpretq_u8_f32(a: float32x4_t) -> uint8x16_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_f16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_f32)"] #[inline] #[cfg(target_endian = "big")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(nop) )] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpretq_p128_f16(a: float16x8_t) -> p128 { - let a: float16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { transmute(a) } +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vreinterpretq_u8_f32(a: float32x4_t) -> uint8x16_t { + let a: float32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; + unsafe { + let ret_val: uint8x16_t = transmute(a); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) + } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_f16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_f32)"] #[inline] #[cfg(target_endian = "little")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(nop) )] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpretq_p64_f16(a: float16x8_t) -> poly64x2_t { +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vreinterpretq_u16_f32(a: float32x4_t) -> uint16x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_f16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_f32)"] #[inline] #[cfg(target_endian = "big")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(nop) )] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpretq_p64_f16(a: float16x8_t) -> poly64x2_t { - let a: float16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vreinterpretq_u16_f32(a: float32x4_t) -> uint16x8_t { + let a: float32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { - let ret_val: poly64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) + let ret_val: uint16x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_p64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_f32)"] #[inline] #[cfg(target_endian = "little")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(nop) )] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpret_f16_p64(a: poly64x1_t) -> float16x4_t { +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vreinterpretq_u32_f32(a: float32x4_t) -> uint32x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_p64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_f32)"] #[inline] #[cfg(target_endian = "big")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(nop) )] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpret_f16_p64(a: poly64x1_t) -> float16x4_t { +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vreinterpretq_u32_f32(a: float32x4_t) -> uint32x4_t { + let a: float32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { - let ret_val: float16x4_t = transmute(a); + let ret_val: uint32x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_p64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_f32)"] #[inline] #[cfg(target_endian = "little")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(nop) )] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpretq_f16_p64(a: poly64x2_t) -> float16x8_t { +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vreinterpretq_u64_f32(a: float32x4_t) -> uint64x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_p64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_f32)"] #[inline] #[cfg(target_endian = "big")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(nop) )] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpretq_f16_p64(a: poly64x2_t) -> float16x8_t { - let a: poly64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vreinterpretq_u64_f32(a: float32x4_t) -> uint64x2_t { + let a: float32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { - let ret_val: float16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + let ret_val: uint64x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_p128)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_f32)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -37873,11 +45320,11 @@ pub fn vreinterpretq_f16_p64(a: poly64x2_t) -> float16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_f32_p128(a: p128) -> float32x4_t { +pub fn vreinterpretq_p8_f32(a: float32x4_t) -> poly8x16_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_p128)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_f32)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -37895,14 +45342,19 @@ pub fn vreinterpretq_f32_p128(a: p128) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_f32_p128(a: p128) -> float32x4_t { +pub fn vreinterpretq_p8_f32(a: float32x4_t) -> poly8x16_t { + let a: float32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { - let ret_val: float32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + let ret_val: poly8x16_t = transmute(a); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_f32)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -37920,11 +45372,11 @@ pub fn vreinterpretq_f32_p128(a: p128) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s8_f32(a: float32x2_t) -> int8x8_t { +pub fn vreinterpretq_p16_f32(a: float32x4_t) -> poly16x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_f32)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -37942,15 +45394,15 @@ pub fn vreinterpret_s8_f32(a: float32x2_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s8_f32(a: float32x2_t) -> int8x8_t { - let a: float32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; +pub fn vreinterpretq_p16_f32(a: float32x4_t) -> poly16x8_t { + let a: float32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { - let ret_val: int8x8_t = transmute(a); + let ret_val: poly16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_s8)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -37968,11 +45420,11 @@ pub fn vreinterpret_s8_f32(a: float32x2_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s16_f32(a: float32x2_t) -> int16x4_t { +pub fn vreinterpret_f32_s8(a: int8x8_t) -> float32x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_s8)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -37990,15 +45442,15 @@ pub fn vreinterpret_s16_f32(a: float32x2_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s16_f32(a: float32x2_t) -> int16x4_t { - let a: float32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; +pub fn vreinterpret_f32_s8(a: int8x8_t) -> float32x2_t { + let a: int8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { - let ret_val: int16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + let ret_val: float32x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_s8)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -38016,11 +45468,11 @@ pub fn vreinterpret_s16_f32(a: float32x2_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s32_f32(a: float32x2_t) -> int32x2_t { +pub fn vreinterpret_s16_s8(a: int8x8_t) -> int16x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_s8)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -38038,15 +45490,15 @@ pub fn vreinterpret_s32_f32(a: float32x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s32_f32(a: float32x2_t) -> int32x2_t { - let a: float32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; +pub fn vreinterpret_s16_s8(a: int8x8_t) -> int16x4_t { + let a: int8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { - let ret_val: int32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) + let ret_val: int16x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_s8)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -38064,11 +45516,11 @@ pub fn vreinterpret_s32_f32(a: float32x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s64_f32(a: float32x2_t) -> int64x1_t { +pub fn vreinterpret_s32_s8(a: int8x8_t) -> int32x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_s8)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -38086,12 +45538,15 @@ pub fn vreinterpret_s64_f32(a: float32x2_t) -> int64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s64_f32(a: float32x2_t) -> int64x1_t { - let a: float32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - unsafe { transmute(a) } +pub fn vreinterpret_s32_s8(a: int8x8_t) -> int32x2_t { + let a: int8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; + unsafe { + let ret_val: int32x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) + } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_s8)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -38109,11 +45564,11 @@ pub fn vreinterpret_s64_f32(a: float32x2_t) -> int64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u8_f32(a: float32x2_t) -> uint8x8_t { +pub fn vreinterpret_s64_s8(a: int8x8_t) -> int64x1_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_s8)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -38131,15 +45586,12 @@ pub fn vreinterpret_u8_f32(a: float32x2_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u8_f32(a: float32x2_t) -> uint8x8_t { - let a: float32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - unsafe { - let ret_val: uint8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } +pub fn vreinterpret_s64_s8(a: int8x8_t) -> int64x1_t { + let a: int8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_s8)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -38157,11 +45609,11 @@ pub fn vreinterpret_u8_f32(a: float32x2_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u16_f32(a: float32x2_t) -> uint16x4_t { +pub fn vreinterpret_u8_s8(a: int8x8_t) -> uint8x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_s8)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -38179,15 +45631,15 @@ pub fn vreinterpret_u16_f32(a: float32x2_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u16_f32(a: float32x2_t) -> uint16x4_t { - let a: float32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; +pub fn vreinterpret_u8_s8(a: int8x8_t) -> uint8x8_t { + let a: int8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { - let ret_val: uint16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + let ret_val: uint8x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_s8)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -38205,11 +45657,11 @@ pub fn vreinterpret_u16_f32(a: float32x2_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u32_f32(a: float32x2_t) -> uint32x2_t { +pub fn vreinterpret_u16_s8(a: int8x8_t) -> uint16x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_s8)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -38227,15 +45679,15 @@ pub fn vreinterpret_u32_f32(a: float32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u32_f32(a: float32x2_t) -> uint32x2_t { - let a: float32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; +pub fn vreinterpret_u16_s8(a: int8x8_t) -> uint16x4_t { + let a: int8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { - let ret_val: uint32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) + let ret_val: uint16x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_s8)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -38253,11 +45705,11 @@ pub fn vreinterpret_u32_f32(a: float32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u64_f32(a: float32x2_t) -> uint64x1_t { +pub fn vreinterpret_u32_s8(a: int8x8_t) -> uint32x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_s8)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -38275,12 +45727,15 @@ pub fn vreinterpret_u64_f32(a: float32x2_t) -> uint64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u64_f32(a: float32x2_t) -> uint64x1_t { - let a: float32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - unsafe { transmute(a) } +pub fn vreinterpret_u32_s8(a: int8x8_t) -> uint32x2_t { + let a: int8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; + unsafe { + let ret_val: uint32x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) + } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_s8)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -38298,11 +45753,11 @@ pub fn vreinterpret_u64_f32(a: float32x2_t) -> uint64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p8_f32(a: float32x2_t) -> poly8x8_t { +pub fn vreinterpret_u64_s8(a: int8x8_t) -> uint64x1_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_s8)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -38320,15 +45775,12 @@ pub fn vreinterpret_p8_f32(a: float32x2_t) -> poly8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p8_f32(a: float32x2_t) -> poly8x8_t { - let a: float32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - unsafe { - let ret_val: poly8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } +pub fn vreinterpret_u64_s8(a: int8x8_t) -> uint64x1_t { + let a: int8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_s8)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -38346,11 +45798,11 @@ pub fn vreinterpret_p8_f32(a: float32x2_t) -> poly8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p16_f32(a: float32x2_t) -> poly16x4_t { +pub fn vreinterpret_p8_s8(a: int8x8_t) -> poly8x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_s8)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -38368,15 +45820,15 @@ pub fn vreinterpret_p16_f32(a: float32x2_t) -> poly16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p16_f32(a: float32x2_t) -> poly16x4_t { - let a: float32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; +pub fn vreinterpret_p8_s8(a: int8x8_t) -> poly8x8_t { + let a: int8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { - let ret_val: poly16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + let ret_val: poly8x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_s8)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -38394,11 +45846,11 @@ pub fn vreinterpret_p16_f32(a: float32x2_t) -> poly16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p128_f32(a: float32x4_t) -> p128 { +pub fn vreinterpret_p16_s8(a: int8x8_t) -> poly16x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_s8)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -38416,12 +45868,15 @@ pub fn vreinterpretq_p128_f32(a: float32x4_t) -> p128 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p128_f32(a: float32x4_t) -> p128 { - let a: float32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - unsafe { transmute(a) } +pub fn vreinterpret_p16_s8(a: int8x8_t) -> poly16x4_t { + let a: int8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; + unsafe { + let ret_val: poly16x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_s8)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -38439,11 +45894,11 @@ pub fn vreinterpretq_p128_f32(a: float32x4_t) -> p128 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s8_f32(a: float32x4_t) -> int8x16_t { +pub fn vreinterpretq_f32_s8(a: int8x16_t) -> float32x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_s8)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -38461,19 +45916,16 @@ pub fn vreinterpretq_s8_f32(a: float32x4_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s8_f32(a: float32x4_t) -> int8x16_t { - let a: float32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; +pub fn vreinterpretq_f32_s8(a: int8x16_t) -> float32x4_t { + let a: int8x16_t = + unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { - let ret_val: int8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) + let ret_val: float32x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_s8)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -38491,11 +45943,11 @@ pub fn vreinterpretq_s8_f32(a: float32x4_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s16_f32(a: float32x4_t) -> int16x8_t { +pub fn vreinterpretq_s16_s8(a: int8x16_t) -> int16x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_s8)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -38513,15 +45965,16 @@ pub fn vreinterpretq_s16_f32(a: float32x4_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s16_f32(a: float32x4_t) -> int16x8_t { - let a: float32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; +pub fn vreinterpretq_s16_s8(a: int8x16_t) -> int16x8_t { + let a: int8x16_t = + unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { let ret_val: int16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_s8)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -38539,11 +45992,11 @@ pub fn vreinterpretq_s16_f32(a: float32x4_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s32_f32(a: float32x4_t) -> int32x4_t { +pub fn vreinterpretq_s32_s8(a: int8x16_t) -> int32x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_s8)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -38561,15 +46014,16 @@ pub fn vreinterpretq_s32_f32(a: float32x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s32_f32(a: float32x4_t) -> int32x4_t { - let a: float32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; +pub fn vreinterpretq_s32_s8(a: int8x16_t) -> int32x4_t { + let a: int8x16_t = + unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { let ret_val: int32x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_s8)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -38587,11 +46041,11 @@ pub fn vreinterpretq_s32_f32(a: float32x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s64_f32(a: float32x4_t) -> int64x2_t { +pub fn vreinterpretq_s64_s8(a: int8x16_t) -> int64x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_s8)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -38609,15 +46063,16 @@ pub fn vreinterpretq_s64_f32(a: float32x4_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s64_f32(a: float32x4_t) -> int64x2_t { - let a: float32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; +pub fn vreinterpretq_s64_s8(a: int8x16_t) -> int64x2_t { + let a: int8x16_t = + unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { let ret_val: int64x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_s8)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -38635,11 +46090,11 @@ pub fn vreinterpretq_s64_f32(a: float32x4_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u8_f32(a: float32x4_t) -> uint8x16_t { +pub fn vreinterpretq_u8_s8(a: int8x16_t) -> uint8x16_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_s8)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -38657,8 +46112,9 @@ pub fn vreinterpretq_u8_f32(a: float32x4_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u8_f32(a: float32x4_t) -> uint8x16_t { - let a: float32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; +pub fn vreinterpretq_u8_s8(a: int8x16_t) -> uint8x16_t { + let a: int8x16_t = + unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { let ret_val: uint8x16_t = transmute(a); simd_shuffle!( @@ -38669,7 +46125,7 @@ pub fn vreinterpretq_u8_f32(a: float32x4_t) -> uint8x16_t { } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_s8)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -38687,11 +46143,11 @@ pub fn vreinterpretq_u8_f32(a: float32x4_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u16_f32(a: float32x4_t) -> uint16x8_t { +pub fn vreinterpretq_u16_s8(a: int8x16_t) -> uint16x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_s8)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -38709,15 +46165,16 @@ pub fn vreinterpretq_u16_f32(a: float32x4_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u16_f32(a: float32x4_t) -> uint16x8_t { - let a: float32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; +pub fn vreinterpretq_u16_s8(a: int8x16_t) -> uint16x8_t { + let a: int8x16_t = + unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { let ret_val: uint16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_s8)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -38735,11 +46192,11 @@ pub fn vreinterpretq_u16_f32(a: float32x4_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u32_f32(a: float32x4_t) -> uint32x4_t { +pub fn vreinterpretq_u32_s8(a: int8x16_t) -> uint32x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_s8)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -38757,15 +46214,16 @@ pub fn vreinterpretq_u32_f32(a: float32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u32_f32(a: float32x4_t) -> uint32x4_t { - let a: float32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; +pub fn vreinterpretq_u32_s8(a: int8x16_t) -> uint32x4_t { + let a: int8x16_t = + unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { let ret_val: uint32x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_s8)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -38783,11 +46241,11 @@ pub fn vreinterpretq_u32_f32(a: float32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u64_f32(a: float32x4_t) -> uint64x2_t { +pub fn vreinterpretq_u64_s8(a: int8x16_t) -> uint64x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_s8)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -38805,15 +46263,16 @@ pub fn vreinterpretq_u64_f32(a: float32x4_t) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u64_f32(a: float32x4_t) -> uint64x2_t { - let a: float32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; +pub fn vreinterpretq_u64_s8(a: int8x16_t) -> uint64x2_t { + let a: int8x16_t = + unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { let ret_val: uint64x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_s8)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -38831,11 +46290,11 @@ pub fn vreinterpretq_u64_f32(a: float32x4_t) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p8_f32(a: float32x4_t) -> poly8x16_t { +pub fn vreinterpretq_p8_s8(a: int8x16_t) -> poly8x16_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_s8)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -38853,8 +46312,9 @@ pub fn vreinterpretq_p8_f32(a: float32x4_t) -> poly8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p8_f32(a: float32x4_t) -> poly8x16_t { - let a: float32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; +pub fn vreinterpretq_p8_s8(a: int8x16_t) -> poly8x16_t { + let a: int8x16_t = + unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { let ret_val: poly8x16_t = transmute(a); simd_shuffle!( @@ -38865,7 +46325,7 @@ pub fn vreinterpretq_p8_f32(a: float32x4_t) -> poly8x16_t { } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_s8)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -38883,11 +46343,11 @@ pub fn vreinterpretq_p8_f32(a: float32x4_t) -> poly8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p16_f32(a: float32x4_t) -> poly16x8_t { +pub fn vreinterpretq_p16_s8(a: int8x16_t) -> poly16x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_s8)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -38905,15 +46365,16 @@ pub fn vreinterpretq_p16_f32(a: float32x4_t) -> poly16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p16_f32(a: float32x4_t) -> poly16x8_t { - let a: float32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; +pub fn vreinterpretq_p16_s8(a: int8x16_t) -> poly16x8_t { + let a: int8x16_t = + unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { let ret_val: poly16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_s16)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -38931,11 +46392,11 @@ pub fn vreinterpretq_p16_f32(a: float32x4_t) -> poly16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_f32_s8(a: int8x8_t) -> float32x2_t { +pub fn vreinterpret_f32_s16(a: int16x4_t) -> float32x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_s16)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -38953,15 +46414,15 @@ pub fn vreinterpret_f32_s8(a: int8x8_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_f32_s8(a: int8x8_t) -> float32x2_t { - let a: int8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; +pub fn vreinterpret_f32_s16(a: int16x4_t) -> float32x2_t { + let a: int16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { let ret_val: float32x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_s16)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -38979,11 +46440,11 @@ pub fn vreinterpret_f32_s8(a: int8x8_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s16_s8(a: int8x8_t) -> int16x4_t { +pub fn vreinterpret_s8_s16(a: int16x4_t) -> int8x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_s16)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -39001,15 +46462,15 @@ pub fn vreinterpret_s16_s8(a: int8x8_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s16_s8(a: int8x8_t) -> int16x4_t { - let a: int8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; +pub fn vreinterpret_s8_s16(a: int16x4_t) -> int8x8_t { + let a: int16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { - let ret_val: int16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + let ret_val: int8x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_s16)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -39027,11 +46488,11 @@ pub fn vreinterpret_s16_s8(a: int8x8_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s32_s8(a: int8x8_t) -> int32x2_t { +pub fn vreinterpret_s32_s16(a: int16x4_t) -> int32x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_s16)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -39049,15 +46510,15 @@ pub fn vreinterpret_s32_s8(a: int8x8_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s32_s8(a: int8x8_t) -> int32x2_t { - let a: int8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; +pub fn vreinterpret_s32_s16(a: int16x4_t) -> int32x2_t { + let a: int16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { let ret_val: int32x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_s16)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -39075,11 +46536,11 @@ pub fn vreinterpret_s32_s8(a: int8x8_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s64_s8(a: int8x8_t) -> int64x1_t { +pub fn vreinterpret_s64_s16(a: int16x4_t) -> int64x1_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_s16)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -39097,12 +46558,12 @@ pub fn vreinterpret_s64_s8(a: int8x8_t) -> int64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s64_s8(a: int8x8_t) -> int64x1_t { - let a: int8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; +pub fn vreinterpret_s64_s16(a: int16x4_t) -> int64x1_t { + let a: int16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_s16)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -39120,11 +46581,11 @@ pub fn vreinterpret_s64_s8(a: int8x8_t) -> int64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u8_s8(a: int8x8_t) -> uint8x8_t { +pub fn vreinterpret_u8_s16(a: int16x4_t) -> uint8x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_s16)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -39142,15 +46603,15 @@ pub fn vreinterpret_u8_s8(a: int8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u8_s8(a: int8x8_t) -> uint8x8_t { - let a: int8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; +pub fn vreinterpret_u8_s16(a: int16x4_t) -> uint8x8_t { + let a: int16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { let ret_val: uint8x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_s16)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -39168,11 +46629,11 @@ pub fn vreinterpret_u8_s8(a: int8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u16_s8(a: int8x8_t) -> uint16x4_t { +pub fn vreinterpret_u16_s16(a: int16x4_t) -> uint16x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_s16)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -39190,15 +46651,15 @@ pub fn vreinterpret_u16_s8(a: int8x8_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u16_s8(a: int8x8_t) -> uint16x4_t { - let a: int8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; +pub fn vreinterpret_u16_s16(a: int16x4_t) -> uint16x4_t { + let a: int16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { let ret_val: uint16x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_s16)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -39216,11 +46677,11 @@ pub fn vreinterpret_u16_s8(a: int8x8_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u32_s8(a: int8x8_t) -> uint32x2_t { +pub fn vreinterpret_u32_s16(a: int16x4_t) -> uint32x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_s16)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -39238,15 +46699,15 @@ pub fn vreinterpret_u32_s8(a: int8x8_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u32_s8(a: int8x8_t) -> uint32x2_t { - let a: int8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; +pub fn vreinterpret_u32_s16(a: int16x4_t) -> uint32x2_t { + let a: int16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { let ret_val: uint32x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_s16)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -39264,11 +46725,11 @@ pub fn vreinterpret_u32_s8(a: int8x8_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u64_s8(a: int8x8_t) -> uint64x1_t { +pub fn vreinterpret_u64_s16(a: int16x4_t) -> uint64x1_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_s16)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -39286,12 +46747,12 @@ pub fn vreinterpret_u64_s8(a: int8x8_t) -> uint64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u64_s8(a: int8x8_t) -> uint64x1_t { - let a: int8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; +pub fn vreinterpret_u64_s16(a: int16x4_t) -> uint64x1_t { + let a: int16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_s16)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -39309,11 +46770,11 @@ pub fn vreinterpret_u64_s8(a: int8x8_t) -> uint64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p8_s8(a: int8x8_t) -> poly8x8_t { +pub fn vreinterpret_p8_s16(a: int16x4_t) -> poly8x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_s16)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -39331,15 +46792,15 @@ pub fn vreinterpret_p8_s8(a: int8x8_t) -> poly8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p8_s8(a: int8x8_t) -> poly8x8_t { - let a: int8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; +pub fn vreinterpret_p8_s16(a: int16x4_t) -> poly8x8_t { + let a: int16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { let ret_val: poly8x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_s16)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -39357,11 +46818,11 @@ pub fn vreinterpret_p8_s8(a: int8x8_t) -> poly8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p16_s8(a: int8x8_t) -> poly16x4_t { +pub fn vreinterpret_p16_s16(a: int16x4_t) -> poly16x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_s16)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -39379,15 +46840,15 @@ pub fn vreinterpret_p16_s8(a: int8x8_t) -> poly16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p16_s8(a: int8x8_t) -> poly16x4_t { - let a: int8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; +pub fn vreinterpret_p16_s16(a: int16x4_t) -> poly16x4_t { + let a: int16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { let ret_val: poly16x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_s16)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -39405,11 +46866,11 @@ pub fn vreinterpret_p16_s8(a: int8x8_t) -> poly16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_f32_s8(a: int8x16_t) -> float32x4_t { +pub fn vreinterpretq_f32_s16(a: int16x8_t) -> float32x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_s16)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -39427,16 +46888,15 @@ pub fn vreinterpretq_f32_s8(a: int8x16_t) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_f32_s8(a: int8x16_t) -> float32x4_t { - let a: int8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; +pub fn vreinterpretq_f32_s16(a: int16x8_t) -> float32x4_t { + let a: int16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { let ret_val: float32x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_s16)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -39454,11 +46914,11 @@ pub fn vreinterpretq_f32_s8(a: int8x16_t) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s16_s8(a: int8x16_t) -> int16x8_t { +pub fn vreinterpretq_s8_s16(a: int16x8_t) -> int8x16_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_s16)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -39476,16 +46936,19 @@ pub fn vreinterpretq_s16_s8(a: int8x16_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s16_s8(a: int8x16_t) -> int16x8_t { - let a: int8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; +pub fn vreinterpretq_s8_s16(a: int16x8_t) -> int8x16_t { + let a: int16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { - let ret_val: int16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + let ret_val: int8x16_t = transmute(a); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_s16)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -39503,11 +46966,11 @@ pub fn vreinterpretq_s16_s8(a: int8x16_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s32_s8(a: int8x16_t) -> int32x4_t { +pub fn vreinterpretq_s32_s16(a: int16x8_t) -> int32x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_s16)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -39525,16 +46988,15 @@ pub fn vreinterpretq_s32_s8(a: int8x16_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s32_s8(a: int8x16_t) -> int32x4_t { - let a: int8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; +pub fn vreinterpretq_s32_s16(a: int16x8_t) -> int32x4_t { + let a: int16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { let ret_val: int32x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_s16)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -39552,11 +47014,11 @@ pub fn vreinterpretq_s32_s8(a: int8x16_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s64_s8(a: int8x16_t) -> int64x2_t { +pub fn vreinterpretq_s64_s16(a: int16x8_t) -> int64x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_s16)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -39574,16 +47036,15 @@ pub fn vreinterpretq_s64_s8(a: int8x16_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s64_s8(a: int8x16_t) -> int64x2_t { - let a: int8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; +pub fn vreinterpretq_s64_s16(a: int16x8_t) -> int64x2_t { + let a: int16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { let ret_val: int64x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_s16)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -39601,11 +47062,11 @@ pub fn vreinterpretq_s64_s8(a: int8x16_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u8_s8(a: int8x16_t) -> uint8x16_t { +pub fn vreinterpretq_u8_s16(a: int16x8_t) -> uint8x16_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_s16)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -39623,9 +47084,8 @@ pub fn vreinterpretq_u8_s8(a: int8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u8_s8(a: int8x16_t) -> uint8x16_t { - let a: int8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; +pub fn vreinterpretq_u8_s16(a: int16x8_t) -> uint8x16_t { + let a: int16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { let ret_val: uint8x16_t = transmute(a); simd_shuffle!( @@ -39636,7 +47096,7 @@ pub fn vreinterpretq_u8_s8(a: int8x16_t) -> uint8x16_t { } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_s16)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -39654,11 +47114,11 @@ pub fn vreinterpretq_u8_s8(a: int8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u16_s8(a: int8x16_t) -> uint16x8_t { +pub fn vreinterpretq_u16_s16(a: int16x8_t) -> uint16x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_s16)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -39676,16 +47136,15 @@ pub fn vreinterpretq_u16_s8(a: int8x16_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u16_s8(a: int8x16_t) -> uint16x8_t { - let a: int8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; +pub fn vreinterpretq_u16_s16(a: int16x8_t) -> uint16x8_t { + let a: int16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { let ret_val: uint16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_s16)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -39703,11 +47162,11 @@ pub fn vreinterpretq_u16_s8(a: int8x16_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u32_s8(a: int8x16_t) -> uint32x4_t { +pub fn vreinterpretq_u32_s16(a: int16x8_t) -> uint32x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_s16)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -39725,16 +47184,15 @@ pub fn vreinterpretq_u32_s8(a: int8x16_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u32_s8(a: int8x16_t) -> uint32x4_t { - let a: int8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; +pub fn vreinterpretq_u32_s16(a: int16x8_t) -> uint32x4_t { + let a: int16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { let ret_val: uint32x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_s16)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -39752,11 +47210,11 @@ pub fn vreinterpretq_u32_s8(a: int8x16_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u64_s8(a: int8x16_t) -> uint64x2_t { +pub fn vreinterpretq_u64_s16(a: int16x8_t) -> uint64x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_s16)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -39774,16 +47232,15 @@ pub fn vreinterpretq_u64_s8(a: int8x16_t) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u64_s8(a: int8x16_t) -> uint64x2_t { - let a: int8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; +pub fn vreinterpretq_u64_s16(a: int16x8_t) -> uint64x2_t { + let a: int16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { let ret_val: uint64x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_s16)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -39801,11 +47258,11 @@ pub fn vreinterpretq_u64_s8(a: int8x16_t) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p8_s8(a: int8x16_t) -> poly8x16_t { +pub fn vreinterpretq_p8_s16(a: int16x8_t) -> poly8x16_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_s16)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -39823,9 +47280,8 @@ pub fn vreinterpretq_p8_s8(a: int8x16_t) -> poly8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p8_s8(a: int8x16_t) -> poly8x16_t { - let a: int8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; +pub fn vreinterpretq_p8_s16(a: int16x8_t) -> poly8x16_t { + let a: int16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { let ret_val: poly8x16_t = transmute(a); simd_shuffle!( @@ -39836,7 +47292,7 @@ pub fn vreinterpretq_p8_s8(a: int8x16_t) -> poly8x16_t { } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_s16)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -39854,11 +47310,11 @@ pub fn vreinterpretq_p8_s8(a: int8x16_t) -> poly8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p16_s8(a: int8x16_t) -> poly16x8_t { +pub fn vreinterpretq_p16_s16(a: int16x8_t) -> poly16x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_s16)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -39876,16 +47332,15 @@ pub fn vreinterpretq_p16_s8(a: int8x16_t) -> poly16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p16_s8(a: int8x16_t) -> poly16x8_t { - let a: int8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; +pub fn vreinterpretq_p16_s16(a: int16x8_t) -> poly16x8_t { + let a: int16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { let ret_val: poly16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_s32)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -39903,11 +47358,11 @@ pub fn vreinterpretq_p16_s8(a: int8x16_t) -> poly16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_f32_s16(a: int16x4_t) -> float32x2_t { +pub fn vreinterpret_f32_s32(a: int32x2_t) -> float32x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_s32)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -39925,15 +47380,15 @@ pub fn vreinterpret_f32_s16(a: int16x4_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_f32_s16(a: int16x4_t) -> float32x2_t { - let a: int16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; +pub fn vreinterpret_f32_s32(a: int32x2_t) -> float32x2_t { + let a: int32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { let ret_val: float32x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_s32)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -39951,11 +47406,11 @@ pub fn vreinterpret_f32_s16(a: int16x4_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s8_s16(a: int16x4_t) -> int8x8_t { +pub fn vreinterpret_s8_s32(a: int32x2_t) -> int8x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_s32)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -39973,15 +47428,15 @@ pub fn vreinterpret_s8_s16(a: int16x4_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s8_s16(a: int16x4_t) -> int8x8_t { - let a: int16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; +pub fn vreinterpret_s8_s32(a: int32x2_t) -> int8x8_t { + let a: int32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { let ret_val: int8x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_s32)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -39999,11 +47454,11 @@ pub fn vreinterpret_s8_s16(a: int16x4_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s32_s16(a: int16x4_t) -> int32x2_t { +pub fn vreinterpret_s16_s32(a: int32x2_t) -> int16x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_s32)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -40021,15 +47476,15 @@ pub fn vreinterpret_s32_s16(a: int16x4_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s32_s16(a: int16x4_t) -> int32x2_t { - let a: int16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; +pub fn vreinterpret_s16_s32(a: int32x2_t) -> int16x4_t { + let a: int32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { - let ret_val: int32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) + let ret_val: int16x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_s32)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -40047,11 +47502,11 @@ pub fn vreinterpret_s32_s16(a: int16x4_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s64_s16(a: int16x4_t) -> int64x1_t { +pub fn vreinterpret_s64_s32(a: int32x2_t) -> int64x1_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_s32)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -40069,12 +47524,12 @@ pub fn vreinterpret_s64_s16(a: int16x4_t) -> int64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s64_s16(a: int16x4_t) -> int64x1_t { - let a: int16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; +pub fn vreinterpret_s64_s32(a: int32x2_t) -> int64x1_t { + let a: int32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_s32)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -40092,11 +47547,11 @@ pub fn vreinterpret_s64_s16(a: int16x4_t) -> int64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u8_s16(a: int16x4_t) -> uint8x8_t { +pub fn vreinterpret_u8_s32(a: int32x2_t) -> uint8x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_s32)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -40114,15 +47569,15 @@ pub fn vreinterpret_u8_s16(a: int16x4_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u8_s16(a: int16x4_t) -> uint8x8_t { - let a: int16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; +pub fn vreinterpret_u8_s32(a: int32x2_t) -> uint8x8_t { + let a: int32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { let ret_val: uint8x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_s32)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -40140,11 +47595,11 @@ pub fn vreinterpret_u8_s16(a: int16x4_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u16_s16(a: int16x4_t) -> uint16x4_t { +pub fn vreinterpret_u16_s32(a: int32x2_t) -> uint16x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_s32)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -40162,15 +47617,15 @@ pub fn vreinterpret_u16_s16(a: int16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u16_s16(a: int16x4_t) -> uint16x4_t { - let a: int16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; +pub fn vreinterpret_u16_s32(a: int32x2_t) -> uint16x4_t { + let a: int32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { let ret_val: uint16x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_s32)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -40188,11 +47643,11 @@ pub fn vreinterpret_u16_s16(a: int16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u32_s16(a: int16x4_t) -> uint32x2_t { +pub fn vreinterpret_u32_s32(a: int32x2_t) -> uint32x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_s32)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -40210,15 +47665,15 @@ pub fn vreinterpret_u32_s16(a: int16x4_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u32_s16(a: int16x4_t) -> uint32x2_t { - let a: int16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; +pub fn vreinterpret_u32_s32(a: int32x2_t) -> uint32x2_t { + let a: int32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { let ret_val: uint32x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_s32)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -40236,11 +47691,11 @@ pub fn vreinterpret_u32_s16(a: int16x4_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u64_s16(a: int16x4_t) -> uint64x1_t { +pub fn vreinterpret_u64_s32(a: int32x2_t) -> uint64x1_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_s32)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -40258,12 +47713,12 @@ pub fn vreinterpret_u64_s16(a: int16x4_t) -> uint64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u64_s16(a: int16x4_t) -> uint64x1_t { - let a: int16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; +pub fn vreinterpret_u64_s32(a: int32x2_t) -> uint64x1_t { + let a: int32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_s32)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -40281,11 +47736,11 @@ pub fn vreinterpret_u64_s16(a: int16x4_t) -> uint64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p8_s16(a: int16x4_t) -> poly8x8_t { +pub fn vreinterpret_p8_s32(a: int32x2_t) -> poly8x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_s32)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -40303,15 +47758,15 @@ pub fn vreinterpret_p8_s16(a: int16x4_t) -> poly8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p8_s16(a: int16x4_t) -> poly8x8_t { - let a: int16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; +pub fn vreinterpret_p8_s32(a: int32x2_t) -> poly8x8_t { + let a: int32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { let ret_val: poly8x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_s32)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -40329,11 +47784,11 @@ pub fn vreinterpret_p8_s16(a: int16x4_t) -> poly8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p16_s16(a: int16x4_t) -> poly16x4_t { +pub fn vreinterpret_p16_s32(a: int32x2_t) -> poly16x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_s32)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -40351,15 +47806,15 @@ pub fn vreinterpret_p16_s16(a: int16x4_t) -> poly16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p16_s16(a: int16x4_t) -> poly16x4_t { - let a: int16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; +pub fn vreinterpret_p16_s32(a: int32x2_t) -> poly16x4_t { + let a: int32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { let ret_val: poly16x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_s32)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -40377,11 +47832,11 @@ pub fn vreinterpret_p16_s16(a: int16x4_t) -> poly16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_f32_s16(a: int16x8_t) -> float32x4_t { +pub fn vreinterpretq_f32_s32(a: int32x4_t) -> float32x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_s32)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -40399,15 +47854,15 @@ pub fn vreinterpretq_f32_s16(a: int16x8_t) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_f32_s16(a: int16x8_t) -> float32x4_t { - let a: int16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; +pub fn vreinterpretq_f32_s32(a: int32x4_t) -> float32x4_t { + let a: int32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { let ret_val: float32x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_s32)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -40425,11 +47880,11 @@ pub fn vreinterpretq_f32_s16(a: int16x8_t) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s8_s16(a: int16x8_t) -> int8x16_t { +pub fn vreinterpretq_s8_s32(a: int32x4_t) -> int8x16_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_s32)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -40447,8 +47902,8 @@ pub fn vreinterpretq_s8_s16(a: int16x8_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s8_s16(a: int16x8_t) -> int8x16_t { - let a: int16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; +pub fn vreinterpretq_s8_s32(a: int32x4_t) -> int8x16_t { + let a: int32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { let ret_val: int8x16_t = transmute(a); simd_shuffle!( @@ -40459,7 +47914,7 @@ pub fn vreinterpretq_s8_s16(a: int16x8_t) -> int8x16_t { } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_s32)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -40477,11 +47932,11 @@ pub fn vreinterpretq_s8_s16(a: int16x8_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s32_s16(a: int16x8_t) -> int32x4_t { +pub fn vreinterpretq_s16_s32(a: int32x4_t) -> int16x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_s32)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -40499,15 +47954,15 @@ pub fn vreinterpretq_s32_s16(a: int16x8_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s32_s16(a: int16x8_t) -> int32x4_t { - let a: int16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; +pub fn vreinterpretq_s16_s32(a: int32x4_t) -> int16x8_t { + let a: int32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { - let ret_val: int32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + let ret_val: int16x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_s32)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -40525,11 +47980,11 @@ pub fn vreinterpretq_s32_s16(a: int16x8_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s64_s16(a: int16x8_t) -> int64x2_t { +pub fn vreinterpretq_s64_s32(a: int32x4_t) -> int64x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_s32)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -40547,15 +48002,15 @@ pub fn vreinterpretq_s64_s16(a: int16x8_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s64_s16(a: int16x8_t) -> int64x2_t { - let a: int16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; +pub fn vreinterpretq_s64_s32(a: int32x4_t) -> int64x2_t { + let a: int32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { let ret_val: int64x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_s32)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -40573,11 +48028,11 @@ pub fn vreinterpretq_s64_s16(a: int16x8_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u8_s16(a: int16x8_t) -> uint8x16_t { +pub fn vreinterpretq_u8_s32(a: int32x4_t) -> uint8x16_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_s32)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -40595,8 +48050,8 @@ pub fn vreinterpretq_u8_s16(a: int16x8_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u8_s16(a: int16x8_t) -> uint8x16_t { - let a: int16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; +pub fn vreinterpretq_u8_s32(a: int32x4_t) -> uint8x16_t { + let a: int32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { let ret_val: uint8x16_t = transmute(a); simd_shuffle!( @@ -40607,7 +48062,7 @@ pub fn vreinterpretq_u8_s16(a: int16x8_t) -> uint8x16_t { } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_s32)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -40625,11 +48080,11 @@ pub fn vreinterpretq_u8_s16(a: int16x8_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u16_s16(a: int16x8_t) -> uint16x8_t { +pub fn vreinterpretq_u16_s32(a: int32x4_t) -> uint16x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_s32)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -40647,15 +48102,15 @@ pub fn vreinterpretq_u16_s16(a: int16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u16_s16(a: int16x8_t) -> uint16x8_t { - let a: int16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; +pub fn vreinterpretq_u16_s32(a: int32x4_t) -> uint16x8_t { + let a: int32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { let ret_val: uint16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_s32)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -40673,11 +48128,11 @@ pub fn vreinterpretq_u16_s16(a: int16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u32_s16(a: int16x8_t) -> uint32x4_t { +pub fn vreinterpretq_u32_s32(a: int32x4_t) -> uint32x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_s32)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -40695,15 +48150,15 @@ pub fn vreinterpretq_u32_s16(a: int16x8_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u32_s16(a: int16x8_t) -> uint32x4_t { - let a: int16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; +pub fn vreinterpretq_u32_s32(a: int32x4_t) -> uint32x4_t { + let a: int32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { let ret_val: uint32x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_s32)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -40721,11 +48176,11 @@ pub fn vreinterpretq_u32_s16(a: int16x8_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u64_s16(a: int16x8_t) -> uint64x2_t { +pub fn vreinterpretq_u64_s32(a: int32x4_t) -> uint64x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_s32)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -40743,15 +48198,15 @@ pub fn vreinterpretq_u64_s16(a: int16x8_t) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u64_s16(a: int16x8_t) -> uint64x2_t { - let a: int16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; +pub fn vreinterpretq_u64_s32(a: int32x4_t) -> uint64x2_t { + let a: int32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { let ret_val: uint64x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_s32)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -40769,11 +48224,11 @@ pub fn vreinterpretq_u64_s16(a: int16x8_t) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p8_s16(a: int16x8_t) -> poly8x16_t { +pub fn vreinterpretq_p8_s32(a: int32x4_t) -> poly8x16_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_s32)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -40791,8 +48246,8 @@ pub fn vreinterpretq_p8_s16(a: int16x8_t) -> poly8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p8_s16(a: int16x8_t) -> poly8x16_t { - let a: int16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; +pub fn vreinterpretq_p8_s32(a: int32x4_t) -> poly8x16_t { + let a: int32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { let ret_val: poly8x16_t = transmute(a); simd_shuffle!( @@ -40803,7 +48258,7 @@ pub fn vreinterpretq_p8_s16(a: int16x8_t) -> poly8x16_t { } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_s32)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -40821,11 +48276,11 @@ pub fn vreinterpretq_p8_s16(a: int16x8_t) -> poly8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p16_s16(a: int16x8_t) -> poly16x8_t { +pub fn vreinterpretq_p16_s32(a: int32x4_t) -> poly16x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_s32)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -40843,15 +48298,15 @@ pub fn vreinterpretq_p16_s16(a: int16x8_t) -> poly16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p16_s16(a: int16x8_t) -> poly16x8_t { - let a: int16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; +pub fn vreinterpretq_p16_s32(a: int32x4_t) -> poly16x8_t { + let a: int32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { let ret_val: poly16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_s64)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -40869,11 +48324,11 @@ pub fn vreinterpretq_p16_s16(a: int16x8_t) -> poly16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_f32_s32(a: int32x2_t) -> float32x2_t { +pub fn vreinterpret_f32_s64(a: int64x1_t) -> float32x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_s64)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -40891,15 +48346,14 @@ pub fn vreinterpret_f32_s32(a: int32x2_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_f32_s32(a: int32x2_t) -> float32x2_t { - let a: int32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; +pub fn vreinterpret_f32_s64(a: int64x1_t) -> float32x2_t { unsafe { let ret_val: float32x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_s64)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -40917,11 +48371,11 @@ pub fn vreinterpret_f32_s32(a: int32x2_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s8_s32(a: int32x2_t) -> int8x8_t { +pub fn vreinterpret_s8_s64(a: int64x1_t) -> int8x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_s64)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -40939,15 +48393,14 @@ pub fn vreinterpret_s8_s32(a: int32x2_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s8_s32(a: int32x2_t) -> int8x8_t { - let a: int32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; +pub fn vreinterpret_s8_s64(a: int64x1_t) -> int8x8_t { unsafe { let ret_val: int8x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_s64)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -40965,11 +48418,11 @@ pub fn vreinterpret_s8_s32(a: int32x2_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s16_s32(a: int32x2_t) -> int16x4_t { +pub fn vreinterpret_s16_s64(a: int64x1_t) -> int16x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_s64)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -40987,15 +48440,14 @@ pub fn vreinterpret_s16_s32(a: int32x2_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s16_s32(a: int32x2_t) -> int16x4_t { - let a: int32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; +pub fn vreinterpret_s16_s64(a: int64x1_t) -> int16x4_t { unsafe { let ret_val: int16x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_s64)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -41013,11 +48465,11 @@ pub fn vreinterpret_s16_s32(a: int32x2_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s64_s32(a: int32x2_t) -> int64x1_t { +pub fn vreinterpret_s32_s64(a: int64x1_t) -> int32x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_s64)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -41035,12 +48487,14 @@ pub fn vreinterpret_s64_s32(a: int32x2_t) -> int64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s64_s32(a: int32x2_t) -> int64x1_t { - let a: int32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - unsafe { transmute(a) } +pub fn vreinterpret_s32_s64(a: int64x1_t) -> int32x2_t { + unsafe { + let ret_val: int32x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) + } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_s64)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -41058,11 +48512,11 @@ pub fn vreinterpret_s64_s32(a: int32x2_t) -> int64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u8_s32(a: int32x2_t) -> uint8x8_t { +pub fn vreinterpret_u8_s64(a: int64x1_t) -> uint8x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_s64)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -41080,15 +48534,14 @@ pub fn vreinterpret_u8_s32(a: int32x2_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u8_s32(a: int32x2_t) -> uint8x8_t { - let a: int32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; +pub fn vreinterpret_u8_s64(a: int64x1_t) -> uint8x8_t { unsafe { let ret_val: uint8x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_s64)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -41106,11 +48559,11 @@ pub fn vreinterpret_u8_s32(a: int32x2_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u16_s32(a: int32x2_t) -> uint16x4_t { +pub fn vreinterpret_u16_s64(a: int64x1_t) -> uint16x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_s64)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -41128,39 +48581,16 @@ pub fn vreinterpret_u16_s32(a: int32x2_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u16_s32(a: int32x2_t) -> uint16x4_t { - let a: int32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; +pub fn vreinterpret_u16_s64(a: int64x1_t) -> uint16x4_t { unsafe { - let ret_val: uint16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_s32)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_u32_s32(a: int32x2_t) -> uint32x2_t { - unsafe { transmute(a) } + let ret_val: uint16x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_s64)"] #[inline] -#[cfg(target_endian = "big")] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -41176,17 +48606,13 @@ pub fn vreinterpret_u32_s32(a: int32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u32_s32(a: int32x2_t) -> uint32x2_t { - let a: int32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - unsafe { - let ret_val: uint32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } +pub fn vreinterpret_u32_s64(a: int64x1_t) -> uint32x2_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_s64)"] #[inline] -#[cfg(target_endian = "little")] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -41202,13 +48628,15 @@ pub fn vreinterpret_u32_s32(a: int32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u64_s32(a: int32x2_t) -> uint64x1_t { - unsafe { transmute(a) } +pub fn vreinterpret_u32_s64(a: int64x1_t) -> uint32x2_t { + unsafe { + let ret_val: uint32x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) + } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_s64)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -41224,12 +48652,11 @@ pub fn vreinterpret_u64_s32(a: int32x2_t) -> uint64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u64_s32(a: int32x2_t) -> uint64x1_t { - let a: int32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; +pub fn vreinterpret_u64_s64(a: int64x1_t) -> uint64x1_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_s64)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -41247,11 +48674,11 @@ pub fn vreinterpret_u64_s32(a: int32x2_t) -> uint64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p8_s32(a: int32x2_t) -> poly8x8_t { +pub fn vreinterpret_p8_s64(a: int64x1_t) -> poly8x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_s64)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -41269,15 +48696,14 @@ pub fn vreinterpret_p8_s32(a: int32x2_t) -> poly8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p8_s32(a: int32x2_t) -> poly8x8_t { - let a: int32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; +pub fn vreinterpret_p8_s64(a: int64x1_t) -> poly8x8_t { unsafe { let ret_val: poly8x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_s64)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -41295,11 +48721,11 @@ pub fn vreinterpret_p8_s32(a: int32x2_t) -> poly8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p16_s32(a: int32x2_t) -> poly16x4_t { +pub fn vreinterpret_p16_s64(a: int64x1_t) -> poly16x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_s64)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -41317,15 +48743,14 @@ pub fn vreinterpret_p16_s32(a: int32x2_t) -> poly16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p16_s32(a: int32x2_t) -> poly16x4_t { - let a: int32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; +pub fn vreinterpret_p16_s64(a: int64x1_t) -> poly16x4_t { unsafe { let ret_val: poly16x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_s64)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -41343,11 +48768,11 @@ pub fn vreinterpret_p16_s32(a: int32x2_t) -> poly16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_f32_s32(a: int32x4_t) -> float32x4_t { +pub fn vreinterpretq_f32_s64(a: int64x2_t) -> float32x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_s64)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -41365,15 +48790,15 @@ pub fn vreinterpretq_f32_s32(a: int32x4_t) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_f32_s32(a: int32x4_t) -> float32x4_t { - let a: int32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; +pub fn vreinterpretq_f32_s64(a: int64x2_t) -> float32x4_t { + let a: int64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { let ret_val: float32x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_s64)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -41391,11 +48816,11 @@ pub fn vreinterpretq_f32_s32(a: int32x4_t) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s8_s32(a: int32x4_t) -> int8x16_t { +pub fn vreinterpretq_s8_s64(a: int64x2_t) -> int8x16_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_s64)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -41413,8 +48838,8 @@ pub fn vreinterpretq_s8_s32(a: int32x4_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s8_s32(a: int32x4_t) -> int8x16_t { - let a: int32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; +pub fn vreinterpretq_s8_s64(a: int64x2_t) -> int8x16_t { + let a: int64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { let ret_val: int8x16_t = transmute(a); simd_shuffle!( @@ -41425,7 +48850,7 @@ pub fn vreinterpretq_s8_s32(a: int32x4_t) -> int8x16_t { } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_s64)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -41443,11 +48868,11 @@ pub fn vreinterpretq_s8_s32(a: int32x4_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s16_s32(a: int32x4_t) -> int16x8_t { +pub fn vreinterpretq_s16_s64(a: int64x2_t) -> int16x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_s64)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -41465,15 +48890,15 @@ pub fn vreinterpretq_s16_s32(a: int32x4_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s16_s32(a: int32x4_t) -> int16x8_t { - let a: int32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; +pub fn vreinterpretq_s16_s64(a: int64x2_t) -> int16x8_t { + let a: int64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { let ret_val: int16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_s64)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -41491,11 +48916,11 @@ pub fn vreinterpretq_s16_s32(a: int32x4_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s64_s32(a: int32x4_t) -> int64x2_t { +pub fn vreinterpretq_s32_s64(a: int64x2_t) -> int32x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_s64)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -41513,15 +48938,15 @@ pub fn vreinterpretq_s64_s32(a: int32x4_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s64_s32(a: int32x4_t) -> int64x2_t { - let a: int32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; +pub fn vreinterpretq_s32_s64(a: int64x2_t) -> int32x4_t { + let a: int64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { - let ret_val: int64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) + let ret_val: int32x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_s64)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -41539,11 +48964,11 @@ pub fn vreinterpretq_s64_s32(a: int32x4_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u8_s32(a: int32x4_t) -> uint8x16_t { +pub fn vreinterpretq_u8_s64(a: int64x2_t) -> uint8x16_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_s64)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -41561,8 +48986,8 @@ pub fn vreinterpretq_u8_s32(a: int32x4_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u8_s32(a: int32x4_t) -> uint8x16_t { - let a: int32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; +pub fn vreinterpretq_u8_s64(a: int64x2_t) -> uint8x16_t { + let a: int64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { let ret_val: uint8x16_t = transmute(a); simd_shuffle!( @@ -41573,7 +48998,7 @@ pub fn vreinterpretq_u8_s32(a: int32x4_t) -> uint8x16_t { } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_s64)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -41591,11 +49016,11 @@ pub fn vreinterpretq_u8_s32(a: int32x4_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u16_s32(a: int32x4_t) -> uint16x8_t { +pub fn vreinterpretq_u16_s64(a: int64x2_t) -> uint16x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_s64)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -41613,15 +49038,15 @@ pub fn vreinterpretq_u16_s32(a: int32x4_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u16_s32(a: int32x4_t) -> uint16x8_t { - let a: int32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; +pub fn vreinterpretq_u16_s64(a: int64x2_t) -> uint16x8_t { + let a: int64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { let ret_val: uint16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_s64)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -41639,11 +49064,11 @@ pub fn vreinterpretq_u16_s32(a: int32x4_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u32_s32(a: int32x4_t) -> uint32x4_t { +pub fn vreinterpretq_u32_s64(a: int64x2_t) -> uint32x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_s64)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -41661,15 +49086,15 @@ pub fn vreinterpretq_u32_s32(a: int32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u32_s32(a: int32x4_t) -> uint32x4_t { - let a: int32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; +pub fn vreinterpretq_u32_s64(a: int64x2_t) -> uint32x4_t { + let a: int64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { let ret_val: uint32x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_s64)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -41687,11 +49112,11 @@ pub fn vreinterpretq_u32_s32(a: int32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u64_s32(a: int32x4_t) -> uint64x2_t { +pub fn vreinterpretq_u64_s64(a: int64x2_t) -> uint64x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_s64)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -41709,15 +49134,15 @@ pub fn vreinterpretq_u64_s32(a: int32x4_t) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u64_s32(a: int32x4_t) -> uint64x2_t { - let a: int32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; +pub fn vreinterpretq_u64_s64(a: int64x2_t) -> uint64x2_t { + let a: int64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { let ret_val: uint64x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_s64)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -41735,11 +49160,11 @@ pub fn vreinterpretq_u64_s32(a: int32x4_t) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p8_s32(a: int32x4_t) -> poly8x16_t { +pub fn vreinterpretq_p8_s64(a: int64x2_t) -> poly8x16_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_s64)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -41757,8 +49182,8 @@ pub fn vreinterpretq_p8_s32(a: int32x4_t) -> poly8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p8_s32(a: int32x4_t) -> poly8x16_t { - let a: int32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; +pub fn vreinterpretq_p8_s64(a: int64x2_t) -> poly8x16_t { + let a: int64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { let ret_val: poly8x16_t = transmute(a); simd_shuffle!( @@ -41769,7 +49194,7 @@ pub fn vreinterpretq_p8_s32(a: int32x4_t) -> poly8x16_t { } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_s64)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -41787,11 +49212,11 @@ pub fn vreinterpretq_p8_s32(a: int32x4_t) -> poly8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p16_s32(a: int32x4_t) -> poly16x8_t { +pub fn vreinterpretq_p16_s64(a: int64x2_t) -> poly16x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_s64)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -41809,15 +49234,15 @@ pub fn vreinterpretq_p16_s32(a: int32x4_t) -> poly16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p16_s32(a: int32x4_t) -> poly16x8_t { - let a: int32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; +pub fn vreinterpretq_p16_s64(a: int64x2_t) -> poly16x8_t { + let a: int64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { let ret_val: poly16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_s64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_u8)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -41835,11 +49260,11 @@ pub fn vreinterpretq_p16_s32(a: int32x4_t) -> poly16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_f32_s64(a: int64x1_t) -> float32x2_t { +pub fn vreinterpret_f32_u8(a: uint8x8_t) -> float32x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_s64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_u8)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -41857,14 +49282,15 @@ pub fn vreinterpret_f32_s64(a: int64x1_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_f32_s64(a: int64x1_t) -> float32x2_t { +pub fn vreinterpret_f32_u8(a: uint8x8_t) -> float32x2_t { + let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { let ret_val: float32x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_s64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_u8)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -41882,11 +49308,11 @@ pub fn vreinterpret_f32_s64(a: int64x1_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s8_s64(a: int64x1_t) -> int8x8_t { +pub fn vreinterpret_s8_u8(a: uint8x8_t) -> int8x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_s64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_u8)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -41904,14 +49330,15 @@ pub fn vreinterpret_s8_s64(a: int64x1_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s8_s64(a: int64x1_t) -> int8x8_t { +pub fn vreinterpret_s8_u8(a: uint8x8_t) -> int8x8_t { + let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { let ret_val: int8x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_s64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_u8)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -41929,11 +49356,11 @@ pub fn vreinterpret_s8_s64(a: int64x1_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s16_s64(a: int64x1_t) -> int16x4_t { +pub fn vreinterpret_s16_u8(a: uint8x8_t) -> int16x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_s64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_u8)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -41951,14 +49378,15 @@ pub fn vreinterpret_s16_s64(a: int64x1_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s16_s64(a: int64x1_t) -> int16x4_t { +pub fn vreinterpret_s16_u8(a: uint8x8_t) -> int16x4_t { + let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { let ret_val: int16x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_s64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_u8)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -41976,11 +49404,11 @@ pub fn vreinterpret_s16_s64(a: int64x1_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s32_s64(a: int64x1_t) -> int32x2_t { +pub fn vreinterpret_s32_u8(a: uint8x8_t) -> int32x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_s64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_u8)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -41998,14 +49426,15 @@ pub fn vreinterpret_s32_s64(a: int64x1_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s32_s64(a: int64x1_t) -> int32x2_t { +pub fn vreinterpret_s32_u8(a: uint8x8_t) -> int32x2_t { + let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { let ret_val: int32x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_s64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_u8)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -42023,11 +49452,11 @@ pub fn vreinterpret_s32_s64(a: int64x1_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u8_s64(a: int64x1_t) -> uint8x8_t { +pub fn vreinterpret_s64_u8(a: uint8x8_t) -> int64x1_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_s64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_u8)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -42045,14 +49474,12 @@ pub fn vreinterpret_u8_s64(a: int64x1_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u8_s64(a: int64x1_t) -> uint8x8_t { - unsafe { - let ret_val: uint8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } +pub fn vreinterpret_s64_u8(a: uint8x8_t) -> int64x1_t { + let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_s64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_u8)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -42070,11 +49497,11 @@ pub fn vreinterpret_u8_s64(a: int64x1_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u16_s64(a: int64x1_t) -> uint16x4_t { +pub fn vreinterpret_u16_u8(a: uint8x8_t) -> uint16x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_s64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_u8)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -42092,14 +49519,15 @@ pub fn vreinterpret_u16_s64(a: int64x1_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u16_s64(a: int64x1_t) -> uint16x4_t { +pub fn vreinterpret_u16_u8(a: uint8x8_t) -> uint16x4_t { + let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { let ret_val: uint16x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_s64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_u8)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -42117,11 +49545,11 @@ pub fn vreinterpret_u16_s64(a: int64x1_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u32_s64(a: int64x1_t) -> uint32x2_t { +pub fn vreinterpret_u32_u8(a: uint8x8_t) -> uint32x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_s64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_u8)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -42139,15 +49567,17 @@ pub fn vreinterpret_u32_s64(a: int64x1_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u32_s64(a: int64x1_t) -> uint32x2_t { +pub fn vreinterpret_u32_u8(a: uint8x8_t) -> uint32x2_t { + let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { let ret_val: uint32x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_s64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_u8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -42163,11 +49593,34 @@ pub fn vreinterpret_u32_s64(a: int64x1_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u64_s64(a: int64x1_t) -> uint64x1_t { +pub fn vreinterpret_u64_u8(a: uint8x8_t) -> uint64x1_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_s64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_u8)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vreinterpret_u64_u8(a: uint8x8_t) -> uint64x1_t { + let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; + unsafe { transmute(a) } +} +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_u8)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -42185,11 +49638,11 @@ pub fn vreinterpret_u64_s64(a: int64x1_t) -> uint64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p8_s64(a: int64x1_t) -> poly8x8_t { +pub fn vreinterpret_p8_u8(a: uint8x8_t) -> poly8x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_s64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_u8)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -42207,14 +49660,15 @@ pub fn vreinterpret_p8_s64(a: int64x1_t) -> poly8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p8_s64(a: int64x1_t) -> poly8x8_t { +pub fn vreinterpret_p8_u8(a: uint8x8_t) -> poly8x8_t { + let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { let ret_val: poly8x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_s64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_u8)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -42232,11 +49686,11 @@ pub fn vreinterpret_p8_s64(a: int64x1_t) -> poly8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p16_s64(a: int64x1_t) -> poly16x4_t { +pub fn vreinterpret_p16_u8(a: uint8x8_t) -> poly16x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_s64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_u8)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -42254,14 +49708,15 @@ pub fn vreinterpret_p16_s64(a: int64x1_t) -> poly16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p16_s64(a: int64x1_t) -> poly16x4_t { +pub fn vreinterpret_p16_u8(a: uint8x8_t) -> poly16x4_t { + let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { let ret_val: poly16x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_s64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_u8)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -42279,11 +49734,11 @@ pub fn vreinterpret_p16_s64(a: int64x1_t) -> poly16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_f32_s64(a: int64x2_t) -> float32x4_t { +pub fn vreinterpretq_f32_u8(a: uint8x16_t) -> float32x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_s64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_u8)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -42301,15 +49756,16 @@ pub fn vreinterpretq_f32_s64(a: int64x2_t) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_f32_s64(a: int64x2_t) -> float32x4_t { - let a: int64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; +pub fn vreinterpretq_f32_u8(a: uint8x16_t) -> float32x4_t { + let a: uint8x16_t = + unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { let ret_val: float32x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_s64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_u8)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -42327,11 +49783,11 @@ pub fn vreinterpretq_f32_s64(a: int64x2_t) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s8_s64(a: int64x2_t) -> int8x16_t { +pub fn vreinterpretq_s8_u8(a: uint8x16_t) -> int8x16_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_s64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_u8)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -42349,8 +49805,9 @@ pub fn vreinterpretq_s8_s64(a: int64x2_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s8_s64(a: int64x2_t) -> int8x16_t { - let a: int64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; +pub fn vreinterpretq_s8_u8(a: uint8x16_t) -> int8x16_t { + let a: uint8x16_t = + unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { let ret_val: int8x16_t = transmute(a); simd_shuffle!( @@ -42361,7 +49818,7 @@ pub fn vreinterpretq_s8_s64(a: int64x2_t) -> int8x16_t { } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_s64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_u8)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -42379,11 +49836,11 @@ pub fn vreinterpretq_s8_s64(a: int64x2_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s16_s64(a: int64x2_t) -> int16x8_t { +pub fn vreinterpretq_s16_u8(a: uint8x16_t) -> int16x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_s64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_u8)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -42401,15 +49858,16 @@ pub fn vreinterpretq_s16_s64(a: int64x2_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s16_s64(a: int64x2_t) -> int16x8_t { - let a: int64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; +pub fn vreinterpretq_s16_u8(a: uint8x16_t) -> int16x8_t { + let a: uint8x16_t = + unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { let ret_val: int16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_s64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_u8)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -42427,11 +49885,11 @@ pub fn vreinterpretq_s16_s64(a: int64x2_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s32_s64(a: int64x2_t) -> int32x4_t { +pub fn vreinterpretq_s32_u8(a: uint8x16_t) -> int32x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_s64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_u8)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -42449,15 +49907,16 @@ pub fn vreinterpretq_s32_s64(a: int64x2_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s32_s64(a: int64x2_t) -> int32x4_t { - let a: int64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; +pub fn vreinterpretq_s32_u8(a: uint8x16_t) -> int32x4_t { + let a: uint8x16_t = + unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { let ret_val: int32x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_s64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_u8)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -42475,11 +49934,11 @@ pub fn vreinterpretq_s32_s64(a: int64x2_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u8_s64(a: int64x2_t) -> uint8x16_t { +pub fn vreinterpretq_s64_u8(a: uint8x16_t) -> int64x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_s64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_u8)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -42497,19 +49956,16 @@ pub fn vreinterpretq_u8_s64(a: int64x2_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u8_s64(a: int64x2_t) -> uint8x16_t { - let a: int64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; +pub fn vreinterpretq_s64_u8(a: uint8x16_t) -> int64x2_t { + let a: uint8x16_t = + unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { - let ret_val: uint8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) + let ret_val: int64x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_s64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_u8)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -42527,11 +49983,11 @@ pub fn vreinterpretq_u8_s64(a: int64x2_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u16_s64(a: int64x2_t) -> uint16x8_t { +pub fn vreinterpretq_u16_u8(a: uint8x16_t) -> uint16x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_s64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_u8)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -42549,15 +50005,16 @@ pub fn vreinterpretq_u16_s64(a: int64x2_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u16_s64(a: int64x2_t) -> uint16x8_t { - let a: int64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; +pub fn vreinterpretq_u16_u8(a: uint8x16_t) -> uint16x8_t { + let a: uint8x16_t = + unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { let ret_val: uint16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_s64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_u8)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -42575,11 +50032,11 @@ pub fn vreinterpretq_u16_s64(a: int64x2_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u32_s64(a: int64x2_t) -> uint32x4_t { +pub fn vreinterpretq_u32_u8(a: uint8x16_t) -> uint32x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_s64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_u8)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -42597,15 +50054,16 @@ pub fn vreinterpretq_u32_s64(a: int64x2_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u32_s64(a: int64x2_t) -> uint32x4_t { - let a: int64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; +pub fn vreinterpretq_u32_u8(a: uint8x16_t) -> uint32x4_t { + let a: uint8x16_t = + unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { let ret_val: uint32x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_s64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_u8)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -42623,11 +50081,11 @@ pub fn vreinterpretq_u32_s64(a: int64x2_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u64_s64(a: int64x2_t) -> uint64x2_t { +pub fn vreinterpretq_u64_u8(a: uint8x16_t) -> uint64x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_s64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_u8)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -42645,15 +50103,16 @@ pub fn vreinterpretq_u64_s64(a: int64x2_t) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u64_s64(a: int64x2_t) -> uint64x2_t { - let a: int64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; +pub fn vreinterpretq_u64_u8(a: uint8x16_t) -> uint64x2_t { + let a: uint8x16_t = + unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { let ret_val: uint64x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_s64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_u8)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -42671,11 +50130,11 @@ pub fn vreinterpretq_u64_s64(a: int64x2_t) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p8_s64(a: int64x2_t) -> poly8x16_t { +pub fn vreinterpretq_p8_u8(a: uint8x16_t) -> poly8x16_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_s64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_u8)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -42693,8 +50152,9 @@ pub fn vreinterpretq_p8_s64(a: int64x2_t) -> poly8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p8_s64(a: int64x2_t) -> poly8x16_t { - let a: int64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; +pub fn vreinterpretq_p8_u8(a: uint8x16_t) -> poly8x16_t { + let a: uint8x16_t = + unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { let ret_val: poly8x16_t = transmute(a); simd_shuffle!( @@ -42705,7 +50165,7 @@ pub fn vreinterpretq_p8_s64(a: int64x2_t) -> poly8x16_t { } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_s64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_u8)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -42723,11 +50183,11 @@ pub fn vreinterpretq_p8_s64(a: int64x2_t) -> poly8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p16_s64(a: int64x2_t) -> poly16x8_t { +pub fn vreinterpretq_p16_u8(a: uint8x16_t) -> poly16x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_s64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_u8)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -42745,15 +50205,16 @@ pub fn vreinterpretq_p16_s64(a: int64x2_t) -> poly16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p16_s64(a: int64x2_t) -> poly16x8_t { - let a: int64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; +pub fn vreinterpretq_p16_u8(a: uint8x16_t) -> poly16x8_t { + let a: uint8x16_t = + unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { let ret_val: poly16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_u16)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -42771,11 +50232,11 @@ pub fn vreinterpretq_p16_s64(a: int64x2_t) -> poly16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_f32_u8(a: uint8x8_t) -> float32x2_t { +pub fn vreinterpret_f32_u16(a: uint16x4_t) -> float32x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_u16)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -42793,15 +50254,15 @@ pub fn vreinterpret_f32_u8(a: uint8x8_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_f32_u8(a: uint8x8_t) -> float32x2_t { - let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; +pub fn vreinterpret_f32_u16(a: uint16x4_t) -> float32x2_t { + let a: uint16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { let ret_val: float32x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_u16)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -42819,11 +50280,11 @@ pub fn vreinterpret_f32_u8(a: uint8x8_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s8_u8(a: uint8x8_t) -> int8x8_t { +pub fn vreinterpret_s8_u16(a: uint16x4_t) -> int8x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_u16)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -42841,15 +50302,15 @@ pub fn vreinterpret_s8_u8(a: uint8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s8_u8(a: uint8x8_t) -> int8x8_t { - let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; +pub fn vreinterpret_s8_u16(a: uint16x4_t) -> int8x8_t { + let a: uint16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { let ret_val: int8x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_u16)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -42867,11 +50328,11 @@ pub fn vreinterpret_s8_u8(a: uint8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s16_u8(a: uint8x8_t) -> int16x4_t { +pub fn vreinterpret_s16_u16(a: uint16x4_t) -> int16x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_u16)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -42889,15 +50350,15 @@ pub fn vreinterpret_s16_u8(a: uint8x8_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s16_u8(a: uint8x8_t) -> int16x4_t { - let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; +pub fn vreinterpret_s16_u16(a: uint16x4_t) -> int16x4_t { + let a: uint16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { let ret_val: int16x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_u16)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -42915,11 +50376,11 @@ pub fn vreinterpret_s16_u8(a: uint8x8_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s32_u8(a: uint8x8_t) -> int32x2_t { +pub fn vreinterpret_s32_u16(a: uint16x4_t) -> int32x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_u16)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -42937,15 +50398,15 @@ pub fn vreinterpret_s32_u8(a: uint8x8_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s32_u8(a: uint8x8_t) -> int32x2_t { - let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; +pub fn vreinterpret_s32_u16(a: uint16x4_t) -> int32x2_t { + let a: uint16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { let ret_val: int32x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_u16)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -42963,11 +50424,11 @@ pub fn vreinterpret_s32_u8(a: uint8x8_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s64_u8(a: uint8x8_t) -> int64x1_t { +pub fn vreinterpret_s64_u16(a: uint16x4_t) -> int64x1_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_u16)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -42985,12 +50446,12 @@ pub fn vreinterpret_s64_u8(a: uint8x8_t) -> int64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s64_u8(a: uint8x8_t) -> int64x1_t { - let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; +pub fn vreinterpret_s64_u16(a: uint16x4_t) -> int64x1_t { + let a: uint16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_u16)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -43008,11 +50469,11 @@ pub fn vreinterpret_s64_u8(a: uint8x8_t) -> int64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u16_u8(a: uint8x8_t) -> uint16x4_t { +pub fn vreinterpret_u8_u16(a: uint16x4_t) -> uint8x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_u16)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -43030,15 +50491,15 @@ pub fn vreinterpret_u16_u8(a: uint8x8_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u16_u8(a: uint8x8_t) -> uint16x4_t { - let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; +pub fn vreinterpret_u8_u16(a: uint16x4_t) -> uint8x8_t { + let a: uint16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { - let ret_val: uint16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + let ret_val: uint8x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_u16)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -43056,11 +50517,11 @@ pub fn vreinterpret_u16_u8(a: uint8x8_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u32_u8(a: uint8x8_t) -> uint32x2_t { +pub fn vreinterpret_u32_u16(a: uint16x4_t) -> uint32x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_u16)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -43078,15 +50539,15 @@ pub fn vreinterpret_u32_u8(a: uint8x8_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u32_u8(a: uint8x8_t) -> uint32x2_t { - let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; +pub fn vreinterpret_u32_u16(a: uint16x4_t) -> uint32x2_t { + let a: uint16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { let ret_val: uint32x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_u16)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -43104,11 +50565,11 @@ pub fn vreinterpret_u32_u8(a: uint8x8_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u64_u8(a: uint8x8_t) -> uint64x1_t { +pub fn vreinterpret_u64_u16(a: uint16x4_t) -> uint64x1_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_u16)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -43126,12 +50587,12 @@ pub fn vreinterpret_u64_u8(a: uint8x8_t) -> uint64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u64_u8(a: uint8x8_t) -> uint64x1_t { - let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; +pub fn vreinterpret_u64_u16(a: uint16x4_t) -> uint64x1_t { + let a: uint16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_u16)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -43149,11 +50610,11 @@ pub fn vreinterpret_u64_u8(a: uint8x8_t) -> uint64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p8_u8(a: uint8x8_t) -> poly8x8_t { +pub fn vreinterpret_p8_u16(a: uint16x4_t) -> poly8x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_u16)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -43171,15 +50632,15 @@ pub fn vreinterpret_p8_u8(a: uint8x8_t) -> poly8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p8_u8(a: uint8x8_t) -> poly8x8_t { - let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; +pub fn vreinterpret_p8_u16(a: uint16x4_t) -> poly8x8_t { + let a: uint16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { let ret_val: poly8x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_u16)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -43197,11 +50658,11 @@ pub fn vreinterpret_p8_u8(a: uint8x8_t) -> poly8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p16_u8(a: uint8x8_t) -> poly16x4_t { +pub fn vreinterpret_p16_u16(a: uint16x4_t) -> poly16x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_u16)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -43219,15 +50680,15 @@ pub fn vreinterpret_p16_u8(a: uint8x8_t) -> poly16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p16_u8(a: uint8x8_t) -> poly16x4_t { - let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; +pub fn vreinterpret_p16_u16(a: uint16x4_t) -> poly16x4_t { + let a: uint16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { let ret_val: poly16x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_u16)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -43245,11 +50706,11 @@ pub fn vreinterpret_p16_u8(a: uint8x8_t) -> poly16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_f32_u8(a: uint8x16_t) -> float32x4_t { +pub fn vreinterpretq_f32_u16(a: uint16x8_t) -> float32x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_u16)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -43267,16 +50728,15 @@ pub fn vreinterpretq_f32_u8(a: uint8x16_t) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_f32_u8(a: uint8x16_t) -> float32x4_t { - let a: uint8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; +pub fn vreinterpretq_f32_u16(a: uint16x8_t) -> float32x4_t { + let a: uint16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { let ret_val: float32x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_u16)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -43294,11 +50754,11 @@ pub fn vreinterpretq_f32_u8(a: uint8x16_t) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s8_u8(a: uint8x16_t) -> int8x16_t { +pub fn vreinterpretq_s8_u16(a: uint16x8_t) -> int8x16_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_u16)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -43314,11 +50774,10 @@ pub fn vreinterpretq_s8_u8(a: uint8x16_t) -> int8x16_t { )] #[cfg_attr( target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_s8_u8(a: uint8x16_t) -> int8x16_t { - let a: uint8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vreinterpretq_s8_u16(a: uint16x8_t) -> int8x16_t { + let a: uint16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { let ret_val: int8x16_t = transmute(a); simd_shuffle!( @@ -43329,7 +50788,7 @@ pub fn vreinterpretq_s8_u8(a: uint8x16_t) -> int8x16_t { } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_u16)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -43347,11 +50806,11 @@ pub fn vreinterpretq_s8_u8(a: uint8x16_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s16_u8(a: uint8x16_t) -> int16x8_t { +pub fn vreinterpretq_s16_u16(a: uint16x8_t) -> int16x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_u16)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -43369,16 +50828,15 @@ pub fn vreinterpretq_s16_u8(a: uint8x16_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s16_u8(a: uint8x16_t) -> int16x8_t { - let a: uint8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; +pub fn vreinterpretq_s16_u16(a: uint16x8_t) -> int16x8_t { + let a: uint16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { let ret_val: int16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_u16)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -43396,11 +50854,11 @@ pub fn vreinterpretq_s16_u8(a: uint8x16_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s32_u8(a: uint8x16_t) -> int32x4_t { +pub fn vreinterpretq_s32_u16(a: uint16x8_t) -> int32x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_u16)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -43418,16 +50876,15 @@ pub fn vreinterpretq_s32_u8(a: uint8x16_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s32_u8(a: uint8x16_t) -> int32x4_t { - let a: uint8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; +pub fn vreinterpretq_s32_u16(a: uint16x8_t) -> int32x4_t { + let a: uint16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { let ret_val: int32x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_u16)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -43445,11 +50902,11 @@ pub fn vreinterpretq_s32_u8(a: uint8x16_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s64_u8(a: uint8x16_t) -> int64x2_t { +pub fn vreinterpretq_s64_u16(a: uint16x8_t) -> int64x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_u16)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -43467,16 +50924,15 @@ pub fn vreinterpretq_s64_u8(a: uint8x16_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s64_u8(a: uint8x16_t) -> int64x2_t { - let a: uint8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; +pub fn vreinterpretq_s64_u16(a: uint16x8_t) -> int64x2_t { + let a: uint16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { let ret_val: int64x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_u16)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -43494,11 +50950,11 @@ pub fn vreinterpretq_s64_u8(a: uint8x16_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u16_u8(a: uint8x16_t) -> uint16x8_t { +pub fn vreinterpretq_u8_u16(a: uint16x8_t) -> uint8x16_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_u16)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -43516,16 +50972,19 @@ pub fn vreinterpretq_u16_u8(a: uint8x16_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u16_u8(a: uint8x16_t) -> uint16x8_t { - let a: uint8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; +pub fn vreinterpretq_u8_u16(a: uint16x8_t) -> uint8x16_t { + let a: uint16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { - let ret_val: uint16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + let ret_val: uint8x16_t = transmute(a); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_u16)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -43543,11 +51002,11 @@ pub fn vreinterpretq_u16_u8(a: uint8x16_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u32_u8(a: uint8x16_t) -> uint32x4_t { +pub fn vreinterpretq_u32_u16(a: uint16x8_t) -> uint32x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_u16)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -43565,16 +51024,15 @@ pub fn vreinterpretq_u32_u8(a: uint8x16_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u32_u8(a: uint8x16_t) -> uint32x4_t { - let a: uint8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; +pub fn vreinterpretq_u32_u16(a: uint16x8_t) -> uint32x4_t { + let a: uint16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { let ret_val: uint32x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_u16)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -43592,11 +51050,11 @@ pub fn vreinterpretq_u32_u8(a: uint8x16_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u64_u8(a: uint8x16_t) -> uint64x2_t { +pub fn vreinterpretq_u64_u16(a: uint16x8_t) -> uint64x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_u16)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -43614,16 +51072,15 @@ pub fn vreinterpretq_u64_u8(a: uint8x16_t) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u64_u8(a: uint8x16_t) -> uint64x2_t { - let a: uint8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; +pub fn vreinterpretq_u64_u16(a: uint16x8_t) -> uint64x2_t { + let a: uint16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { let ret_val: uint64x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_u16)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -43641,11 +51098,11 @@ pub fn vreinterpretq_u64_u8(a: uint8x16_t) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p8_u8(a: uint8x16_t) -> poly8x16_t { +pub fn vreinterpretq_p8_u16(a: uint16x8_t) -> poly8x16_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_u16)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -43663,9 +51120,8 @@ pub fn vreinterpretq_p8_u8(a: uint8x16_t) -> poly8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p8_u8(a: uint8x16_t) -> poly8x16_t { - let a: uint8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; +pub fn vreinterpretq_p8_u16(a: uint16x8_t) -> poly8x16_t { + let a: uint16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { let ret_val: poly8x16_t = transmute(a); simd_shuffle!( @@ -43676,7 +51132,7 @@ pub fn vreinterpretq_p8_u8(a: uint8x16_t) -> poly8x16_t { } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_u16)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -43694,11 +51150,11 @@ pub fn vreinterpretq_p8_u8(a: uint8x16_t) -> poly8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p16_u8(a: uint8x16_t) -> poly16x8_t { +pub fn vreinterpretq_p16_u16(a: uint16x8_t) -> poly16x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_u16)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -43716,16 +51172,15 @@ pub fn vreinterpretq_p16_u8(a: uint8x16_t) -> poly16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p16_u8(a: uint8x16_t) -> poly16x8_t { - let a: uint8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; +pub fn vreinterpretq_p16_u16(a: uint16x8_t) -> poly16x8_t { + let a: uint16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { let ret_val: poly16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_u32)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -43743,11 +51198,11 @@ pub fn vreinterpretq_p16_u8(a: uint8x16_t) -> poly16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_f32_u16(a: uint16x4_t) -> float32x2_t { +pub fn vreinterpret_f32_u32(a: uint32x2_t) -> float32x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_u32)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -43765,15 +51220,15 @@ pub fn vreinterpret_f32_u16(a: uint16x4_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_f32_u16(a: uint16x4_t) -> float32x2_t { - let a: uint16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; +pub fn vreinterpret_f32_u32(a: uint32x2_t) -> float32x2_t { + let a: uint32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { let ret_val: float32x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_u32)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -43791,11 +51246,11 @@ pub fn vreinterpret_f32_u16(a: uint16x4_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s8_u16(a: uint16x4_t) -> int8x8_t { +pub fn vreinterpret_s8_u32(a: uint32x2_t) -> int8x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_u32)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -43813,15 +51268,15 @@ pub fn vreinterpret_s8_u16(a: uint16x4_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s8_u16(a: uint16x4_t) -> int8x8_t { - let a: uint16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; +pub fn vreinterpret_s8_u32(a: uint32x2_t) -> int8x8_t { + let a: uint32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { let ret_val: int8x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_u32)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -43839,11 +51294,11 @@ pub fn vreinterpret_s8_u16(a: uint16x4_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s16_u16(a: uint16x4_t) -> int16x4_t { +pub fn vreinterpret_s16_u32(a: uint32x2_t) -> int16x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_u32)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -43861,15 +51316,15 @@ pub fn vreinterpret_s16_u16(a: uint16x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s16_u16(a: uint16x4_t) -> int16x4_t { - let a: uint16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; +pub fn vreinterpret_s16_u32(a: uint32x2_t) -> int16x4_t { + let a: uint32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { let ret_val: int16x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_u32)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -43887,11 +51342,11 @@ pub fn vreinterpret_s16_u16(a: uint16x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s32_u16(a: uint16x4_t) -> int32x2_t { +pub fn vreinterpret_s32_u32(a: uint32x2_t) -> int32x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_u32)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -43909,15 +51364,15 @@ pub fn vreinterpret_s32_u16(a: uint16x4_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s32_u16(a: uint16x4_t) -> int32x2_t { - let a: uint16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; +pub fn vreinterpret_s32_u32(a: uint32x2_t) -> int32x2_t { + let a: uint32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { let ret_val: int32x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_u32)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -43935,11 +51390,11 @@ pub fn vreinterpret_s32_u16(a: uint16x4_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s64_u16(a: uint16x4_t) -> int64x1_t { +pub fn vreinterpret_s64_u32(a: uint32x2_t) -> int64x1_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_u32)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -43957,12 +51412,12 @@ pub fn vreinterpret_s64_u16(a: uint16x4_t) -> int64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s64_u16(a: uint16x4_t) -> int64x1_t { - let a: uint16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; +pub fn vreinterpret_s64_u32(a: uint32x2_t) -> int64x1_t { + let a: uint32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_u32)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -43980,11 +51435,11 @@ pub fn vreinterpret_s64_u16(a: uint16x4_t) -> int64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u8_u16(a: uint16x4_t) -> uint8x8_t { +pub fn vreinterpret_u8_u32(a: uint32x2_t) -> uint8x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_u32)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -44002,15 +51457,15 @@ pub fn vreinterpret_u8_u16(a: uint16x4_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u8_u16(a: uint16x4_t) -> uint8x8_t { - let a: uint16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; +pub fn vreinterpret_u8_u32(a: uint32x2_t) -> uint8x8_t { + let a: uint32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { let ret_val: uint8x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_u32)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -44028,11 +51483,11 @@ pub fn vreinterpret_u8_u16(a: uint16x4_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u32_u16(a: uint16x4_t) -> uint32x2_t { +pub fn vreinterpret_u16_u32(a: uint32x2_t) -> uint16x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_u32)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -44050,15 +51505,15 @@ pub fn vreinterpret_u32_u16(a: uint16x4_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u32_u16(a: uint16x4_t) -> uint32x2_t { - let a: uint16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; +pub fn vreinterpret_u16_u32(a: uint32x2_t) -> uint16x4_t { + let a: uint32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { - let ret_val: uint32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) + let ret_val: uint16x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_u32)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -44076,11 +51531,11 @@ pub fn vreinterpret_u32_u16(a: uint16x4_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u64_u16(a: uint16x4_t) -> uint64x1_t { +pub fn vreinterpret_u64_u32(a: uint32x2_t) -> uint64x1_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_u32)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -44098,12 +51553,12 @@ pub fn vreinterpret_u64_u16(a: uint16x4_t) -> uint64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u64_u16(a: uint16x4_t) -> uint64x1_t { - let a: uint16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; +pub fn vreinterpret_u64_u32(a: uint32x2_t) -> uint64x1_t { + let a: uint32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_u32)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -44121,11 +51576,11 @@ pub fn vreinterpret_u64_u16(a: uint16x4_t) -> uint64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p8_u16(a: uint16x4_t) -> poly8x8_t { +pub fn vreinterpret_p8_u32(a: uint32x2_t) -> poly8x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_u32)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -44143,15 +51598,15 @@ pub fn vreinterpret_p8_u16(a: uint16x4_t) -> poly8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p8_u16(a: uint16x4_t) -> poly8x8_t { - let a: uint16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; +pub fn vreinterpret_p8_u32(a: uint32x2_t) -> poly8x8_t { + let a: uint32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { let ret_val: poly8x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_u32)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -44169,11 +51624,11 @@ pub fn vreinterpret_p8_u16(a: uint16x4_t) -> poly8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p16_u16(a: uint16x4_t) -> poly16x4_t { +pub fn vreinterpret_p16_u32(a: uint32x2_t) -> poly16x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_u32)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -44191,15 +51646,15 @@ pub fn vreinterpret_p16_u16(a: uint16x4_t) -> poly16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p16_u16(a: uint16x4_t) -> poly16x4_t { - let a: uint16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; +pub fn vreinterpret_p16_u32(a: uint32x2_t) -> poly16x4_t { + let a: uint32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { let ret_val: poly16x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_u32)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -44217,11 +51672,11 @@ pub fn vreinterpret_p16_u16(a: uint16x4_t) -> poly16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_f32_u16(a: uint16x8_t) -> float32x4_t { +pub fn vreinterpretq_f32_u32(a: uint32x4_t) -> float32x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_u32)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -44239,15 +51694,15 @@ pub fn vreinterpretq_f32_u16(a: uint16x8_t) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_f32_u16(a: uint16x8_t) -> float32x4_t { - let a: uint16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; +pub fn vreinterpretq_f32_u32(a: uint32x4_t) -> float32x4_t { + let a: uint32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { let ret_val: float32x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_u32)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -44265,11 +51720,11 @@ pub fn vreinterpretq_f32_u16(a: uint16x8_t) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s8_u16(a: uint16x8_t) -> int8x16_t { +pub fn vreinterpretq_s8_u32(a: uint32x4_t) -> int8x16_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_u32)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -44287,8 +51742,8 @@ pub fn vreinterpretq_s8_u16(a: uint16x8_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s8_u16(a: uint16x8_t) -> int8x16_t { - let a: uint16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; +pub fn vreinterpretq_s8_u32(a: uint32x4_t) -> int8x16_t { + let a: uint32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { let ret_val: int8x16_t = transmute(a); simd_shuffle!( @@ -44299,7 +51754,7 @@ pub fn vreinterpretq_s8_u16(a: uint16x8_t) -> int8x16_t { } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_u32)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -44317,11 +51772,11 @@ pub fn vreinterpretq_s8_u16(a: uint16x8_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s16_u16(a: uint16x8_t) -> int16x8_t { +pub fn vreinterpretq_s16_u32(a: uint32x4_t) -> int16x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_u32)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -44339,15 +51794,15 @@ pub fn vreinterpretq_s16_u16(a: uint16x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s16_u16(a: uint16x8_t) -> int16x8_t { - let a: uint16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; +pub fn vreinterpretq_s16_u32(a: uint32x4_t) -> int16x8_t { + let a: uint32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { let ret_val: int16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_u32)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -44365,11 +51820,11 @@ pub fn vreinterpretq_s16_u16(a: uint16x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s32_u16(a: uint16x8_t) -> int32x4_t { +pub fn vreinterpretq_s32_u32(a: uint32x4_t) -> int32x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_u32)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -44387,15 +51842,15 @@ pub fn vreinterpretq_s32_u16(a: uint16x8_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s32_u16(a: uint16x8_t) -> int32x4_t { - let a: uint16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; +pub fn vreinterpretq_s32_u32(a: uint32x4_t) -> int32x4_t { + let a: uint32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { let ret_val: int32x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_u32)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -44413,11 +51868,11 @@ pub fn vreinterpretq_s32_u16(a: uint16x8_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s64_u16(a: uint16x8_t) -> int64x2_t { +pub fn vreinterpretq_s64_u32(a: uint32x4_t) -> int64x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_u32)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -44435,15 +51890,15 @@ pub fn vreinterpretq_s64_u16(a: uint16x8_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s64_u16(a: uint16x8_t) -> int64x2_t { - let a: uint16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; +pub fn vreinterpretq_s64_u32(a: uint32x4_t) -> int64x2_t { + let a: uint32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { let ret_val: int64x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_u32)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -44461,11 +51916,11 @@ pub fn vreinterpretq_s64_u16(a: uint16x8_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u8_u16(a: uint16x8_t) -> uint8x16_t { +pub fn vreinterpretq_u8_u32(a: uint32x4_t) -> uint8x16_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_u32)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -44483,8 +51938,8 @@ pub fn vreinterpretq_u8_u16(a: uint16x8_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u8_u16(a: uint16x8_t) -> uint8x16_t { - let a: uint16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; +pub fn vreinterpretq_u8_u32(a: uint32x4_t) -> uint8x16_t { + let a: uint32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { let ret_val: uint8x16_t = transmute(a); simd_shuffle!( @@ -44495,7 +51950,7 @@ pub fn vreinterpretq_u8_u16(a: uint16x8_t) -> uint8x16_t { } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_u32)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -44513,11 +51968,11 @@ pub fn vreinterpretq_u8_u16(a: uint16x8_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u32_u16(a: uint16x8_t) -> uint32x4_t { +pub fn vreinterpretq_u16_u32(a: uint32x4_t) -> uint16x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_u32)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -44535,15 +51990,15 @@ pub fn vreinterpretq_u32_u16(a: uint16x8_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u32_u16(a: uint16x8_t) -> uint32x4_t { - let a: uint16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; +pub fn vreinterpretq_u16_u32(a: uint32x4_t) -> uint16x8_t { + let a: uint32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { - let ret_val: uint32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + let ret_val: uint16x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_u32)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -44561,11 +52016,11 @@ pub fn vreinterpretq_u32_u16(a: uint16x8_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u64_u16(a: uint16x8_t) -> uint64x2_t { +pub fn vreinterpretq_u64_u32(a: uint32x4_t) -> uint64x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_u32)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -44583,15 +52038,15 @@ pub fn vreinterpretq_u64_u16(a: uint16x8_t) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u64_u16(a: uint16x8_t) -> uint64x2_t { - let a: uint16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; +pub fn vreinterpretq_u64_u32(a: uint32x4_t) -> uint64x2_t { + let a: uint32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { let ret_val: uint64x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_u32)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -44609,11 +52064,11 @@ pub fn vreinterpretq_u64_u16(a: uint16x8_t) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p8_u16(a: uint16x8_t) -> poly8x16_t { +pub fn vreinterpretq_p8_u32(a: uint32x4_t) -> poly8x16_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_u32)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -44631,8 +52086,8 @@ pub fn vreinterpretq_p8_u16(a: uint16x8_t) -> poly8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p8_u16(a: uint16x8_t) -> poly8x16_t { - let a: uint16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; +pub fn vreinterpretq_p8_u32(a: uint32x4_t) -> poly8x16_t { + let a: uint32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { let ret_val: poly8x16_t = transmute(a); simd_shuffle!( @@ -44643,7 +52098,7 @@ pub fn vreinterpretq_p8_u16(a: uint16x8_t) -> poly8x16_t { } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_u32)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -44661,11 +52116,11 @@ pub fn vreinterpretq_p8_u16(a: uint16x8_t) -> poly8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p16_u16(a: uint16x8_t) -> poly16x8_t { +pub fn vreinterpretq_p16_u32(a: uint32x4_t) -> poly16x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_u32)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -44683,15 +52138,15 @@ pub fn vreinterpretq_p16_u16(a: uint16x8_t) -> poly16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p16_u16(a: uint16x8_t) -> poly16x8_t { - let a: uint16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; +pub fn vreinterpretq_p16_u32(a: uint32x4_t) -> poly16x8_t { + let a: uint32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { let ret_val: poly16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_u64)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -44709,11 +52164,11 @@ pub fn vreinterpretq_p16_u16(a: uint16x8_t) -> poly16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_f32_u32(a: uint32x2_t) -> float32x2_t { +pub fn vreinterpret_f32_u64(a: uint64x1_t) -> float32x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_u64)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -44731,15 +52186,14 @@ pub fn vreinterpret_f32_u32(a: uint32x2_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_f32_u32(a: uint32x2_t) -> float32x2_t { - let a: uint32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; +pub fn vreinterpret_f32_u64(a: uint64x1_t) -> float32x2_t { unsafe { let ret_val: float32x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_u64)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -44757,11 +52211,11 @@ pub fn vreinterpret_f32_u32(a: uint32x2_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s8_u32(a: uint32x2_t) -> int8x8_t { +pub fn vreinterpret_s8_u64(a: uint64x1_t) -> int8x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_u64)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -44779,15 +52233,14 @@ pub fn vreinterpret_s8_u32(a: uint32x2_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s8_u32(a: uint32x2_t) -> int8x8_t { - let a: uint32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; +pub fn vreinterpret_s8_u64(a: uint64x1_t) -> int8x8_t { unsafe { let ret_val: int8x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_u64)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -44805,11 +52258,11 @@ pub fn vreinterpret_s8_u32(a: uint32x2_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s16_u32(a: uint32x2_t) -> int16x4_t { +pub fn vreinterpret_s16_u64(a: uint64x1_t) -> int16x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_u64)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -44827,15 +52280,14 @@ pub fn vreinterpret_s16_u32(a: uint32x2_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s16_u32(a: uint32x2_t) -> int16x4_t { - let a: uint32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; +pub fn vreinterpret_s16_u64(a: uint64x1_t) -> int16x4_t { unsafe { let ret_val: int16x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_u64)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -44853,11 +52305,11 @@ pub fn vreinterpret_s16_u32(a: uint32x2_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s32_u32(a: uint32x2_t) -> int32x2_t { +pub fn vreinterpret_s32_u64(a: uint64x1_t) -> int32x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_u64)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -44875,39 +52327,15 @@ pub fn vreinterpret_s32_u32(a: uint32x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s32_u32(a: uint32x2_t) -> int32x2_t { - let a: uint32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; +pub fn vreinterpret_s32_u64(a: uint64x1_t) -> int32x2_t { unsafe { let ret_val: int32x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_u32)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_s64_u32(a: uint32x2_t) -> int64x1_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_u64)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -44923,12 +52351,11 @@ pub fn vreinterpret_s64_u32(a: uint32x2_t) -> int64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s64_u32(a: uint32x2_t) -> int64x1_t { - let a: uint32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; +pub fn vreinterpret_s64_u64(a: uint64x1_t) -> int64x1_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_u64)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -44946,11 +52373,11 @@ pub fn vreinterpret_s64_u32(a: uint32x2_t) -> int64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u8_u32(a: uint32x2_t) -> uint8x8_t { +pub fn vreinterpret_u8_u64(a: uint64x1_t) -> uint8x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_u64)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -44968,15 +52395,14 @@ pub fn vreinterpret_u8_u32(a: uint32x2_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u8_u32(a: uint32x2_t) -> uint8x8_t { - let a: uint32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; +pub fn vreinterpret_u8_u64(a: uint64x1_t) -> uint8x8_t { unsafe { let ret_val: uint8x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_u64)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -44994,11 +52420,11 @@ pub fn vreinterpret_u8_u32(a: uint32x2_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u16_u32(a: uint32x2_t) -> uint16x4_t { +pub fn vreinterpret_u16_u64(a: uint64x1_t) -> uint16x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_u64)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -45016,15 +52442,14 @@ pub fn vreinterpret_u16_u32(a: uint32x2_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u16_u32(a: uint32x2_t) -> uint16x4_t { - let a: uint32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; +pub fn vreinterpret_u16_u64(a: uint64x1_t) -> uint16x4_t { unsafe { let ret_val: uint16x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_u64)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -45042,11 +52467,11 @@ pub fn vreinterpret_u16_u32(a: uint32x2_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u64_u32(a: uint32x2_t) -> uint64x1_t { +pub fn vreinterpret_u32_u64(a: uint64x1_t) -> uint32x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_u64)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -45064,12 +52489,14 @@ pub fn vreinterpret_u64_u32(a: uint32x2_t) -> uint64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u64_u32(a: uint32x2_t) -> uint64x1_t { - let a: uint32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - unsafe { transmute(a) } +pub fn vreinterpret_u32_u64(a: uint64x1_t) -> uint32x2_t { + unsafe { + let ret_val: uint32x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) + } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_u64)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -45087,11 +52514,11 @@ pub fn vreinterpret_u64_u32(a: uint32x2_t) -> uint64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p8_u32(a: uint32x2_t) -> poly8x8_t { +pub fn vreinterpret_p8_u64(a: uint64x1_t) -> poly8x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_u64)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -45109,15 +52536,14 @@ pub fn vreinterpret_p8_u32(a: uint32x2_t) -> poly8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p8_u32(a: uint32x2_t) -> poly8x8_t { - let a: uint32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; +pub fn vreinterpret_p8_u64(a: uint64x1_t) -> poly8x8_t { unsafe { let ret_val: poly8x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_u64)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -45135,11 +52561,11 @@ pub fn vreinterpret_p8_u32(a: uint32x2_t) -> poly8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p16_u32(a: uint32x2_t) -> poly16x4_t { +pub fn vreinterpret_p16_u64(a: uint64x1_t) -> poly16x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_u64)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -45157,15 +52583,14 @@ pub fn vreinterpret_p16_u32(a: uint32x2_t) -> poly16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p16_u32(a: uint32x2_t) -> poly16x4_t { - let a: uint32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; +pub fn vreinterpret_p16_u64(a: uint64x1_t) -> poly16x4_t { unsafe { let ret_val: poly16x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_u64)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -45183,11 +52608,11 @@ pub fn vreinterpret_p16_u32(a: uint32x2_t) -> poly16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_f32_u32(a: uint32x4_t) -> float32x4_t { +pub fn vreinterpretq_f32_u64(a: uint64x2_t) -> float32x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_u64)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -45205,15 +52630,15 @@ pub fn vreinterpretq_f32_u32(a: uint32x4_t) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_f32_u32(a: uint32x4_t) -> float32x4_t { - let a: uint32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; +pub fn vreinterpretq_f32_u64(a: uint64x2_t) -> float32x4_t { + let a: uint64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { let ret_val: float32x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_u64)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -45231,11 +52656,11 @@ pub fn vreinterpretq_f32_u32(a: uint32x4_t) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s8_u32(a: uint32x4_t) -> int8x16_t { +pub fn vreinterpretq_s8_u64(a: uint64x2_t) -> int8x16_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_u64)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -45253,8 +52678,8 @@ pub fn vreinterpretq_s8_u32(a: uint32x4_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s8_u32(a: uint32x4_t) -> int8x16_t { - let a: uint32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; +pub fn vreinterpretq_s8_u64(a: uint64x2_t) -> int8x16_t { + let a: uint64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { let ret_val: int8x16_t = transmute(a); simd_shuffle!( @@ -45265,7 +52690,7 @@ pub fn vreinterpretq_s8_u32(a: uint32x4_t) -> int8x16_t { } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_u64)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -45283,11 +52708,11 @@ pub fn vreinterpretq_s8_u32(a: uint32x4_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s16_u32(a: uint32x4_t) -> int16x8_t { +pub fn vreinterpretq_s16_u64(a: uint64x2_t) -> int16x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_u64)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -45305,15 +52730,15 @@ pub fn vreinterpretq_s16_u32(a: uint32x4_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s16_u32(a: uint32x4_t) -> int16x8_t { - let a: uint32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; +pub fn vreinterpretq_s16_u64(a: uint64x2_t) -> int16x8_t { + let a: uint64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { let ret_val: int16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_u64)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -45331,11 +52756,11 @@ pub fn vreinterpretq_s16_u32(a: uint32x4_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s32_u32(a: uint32x4_t) -> int32x4_t { +pub fn vreinterpretq_s32_u64(a: uint64x2_t) -> int32x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_u64)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -45353,15 +52778,15 @@ pub fn vreinterpretq_s32_u32(a: uint32x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s32_u32(a: uint32x4_t) -> int32x4_t { - let a: uint32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; +pub fn vreinterpretq_s32_u64(a: uint64x2_t) -> int32x4_t { + let a: uint64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { let ret_val: int32x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_u64)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -45379,11 +52804,11 @@ pub fn vreinterpretq_s32_u32(a: uint32x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s64_u32(a: uint32x4_t) -> int64x2_t { +pub fn vreinterpretq_s64_u64(a: uint64x2_t) -> int64x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_u64)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -45401,15 +52826,15 @@ pub fn vreinterpretq_s64_u32(a: uint32x4_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s64_u32(a: uint32x4_t) -> int64x2_t { - let a: uint32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; +pub fn vreinterpretq_s64_u64(a: uint64x2_t) -> int64x2_t { + let a: uint64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { let ret_val: int64x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_u64)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -45427,11 +52852,11 @@ pub fn vreinterpretq_s64_u32(a: uint32x4_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u8_u32(a: uint32x4_t) -> uint8x16_t { +pub fn vreinterpretq_u8_u64(a: uint64x2_t) -> uint8x16_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_u64)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -45449,8 +52874,8 @@ pub fn vreinterpretq_u8_u32(a: uint32x4_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u8_u32(a: uint32x4_t) -> uint8x16_t { - let a: uint32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; +pub fn vreinterpretq_u8_u64(a: uint64x2_t) -> uint8x16_t { + let a: uint64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { let ret_val: uint8x16_t = transmute(a); simd_shuffle!( @@ -45461,7 +52886,7 @@ pub fn vreinterpretq_u8_u32(a: uint32x4_t) -> uint8x16_t { } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_u64)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -45479,11 +52904,11 @@ pub fn vreinterpretq_u8_u32(a: uint32x4_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u16_u32(a: uint32x4_t) -> uint16x8_t { +pub fn vreinterpretq_u16_u64(a: uint64x2_t) -> uint16x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_u64)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -45501,15 +52926,15 @@ pub fn vreinterpretq_u16_u32(a: uint32x4_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u16_u32(a: uint32x4_t) -> uint16x8_t { - let a: uint32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; +pub fn vreinterpretq_u16_u64(a: uint64x2_t) -> uint16x8_t { + let a: uint64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { let ret_val: uint16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_u64)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -45527,11 +52952,11 @@ pub fn vreinterpretq_u16_u32(a: uint32x4_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u64_u32(a: uint32x4_t) -> uint64x2_t { +pub fn vreinterpretq_u32_u64(a: uint64x2_t) -> uint32x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_u64)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -45549,15 +52974,15 @@ pub fn vreinterpretq_u64_u32(a: uint32x4_t) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u64_u32(a: uint32x4_t) -> uint64x2_t { - let a: uint32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; +pub fn vreinterpretq_u32_u64(a: uint64x2_t) -> uint32x4_t { + let a: uint64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { - let ret_val: uint64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) + let ret_val: uint32x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_u64)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -45575,11 +53000,11 @@ pub fn vreinterpretq_u64_u32(a: uint32x4_t) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p8_u32(a: uint32x4_t) -> poly8x16_t { +pub fn vreinterpretq_p8_u64(a: uint64x2_t) -> poly8x16_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_u64)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -45597,8 +53022,8 @@ pub fn vreinterpretq_p8_u32(a: uint32x4_t) -> poly8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p8_u32(a: uint32x4_t) -> poly8x16_t { - let a: uint32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; +pub fn vreinterpretq_p8_u64(a: uint64x2_t) -> poly8x16_t { + let a: uint64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { let ret_val: poly8x16_t = transmute(a); simd_shuffle!( @@ -45609,7 +53034,7 @@ pub fn vreinterpretq_p8_u32(a: uint32x4_t) -> poly8x16_t { } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_u64)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -45627,11 +53052,11 @@ pub fn vreinterpretq_p8_u32(a: uint32x4_t) -> poly8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p16_u32(a: uint32x4_t) -> poly16x8_t { +pub fn vreinterpretq_p16_u64(a: uint64x2_t) -> poly16x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_u64)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -45649,15 +53074,15 @@ pub fn vreinterpretq_p16_u32(a: uint32x4_t) -> poly16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p16_u32(a: uint32x4_t) -> poly16x8_t { - let a: uint32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; +pub fn vreinterpretq_p16_u64(a: uint64x2_t) -> poly16x8_t { + let a: uint64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { let ret_val: poly16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_u64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_p8)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -45675,11 +53100,11 @@ pub fn vreinterpretq_p16_u32(a: uint32x4_t) -> poly16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_f32_u64(a: uint64x1_t) -> float32x2_t { +pub fn vreinterpret_f32_p8(a: poly8x8_t) -> float32x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_u64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_p8)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -45697,14 +53122,15 @@ pub fn vreinterpret_f32_u64(a: uint64x1_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_f32_u64(a: uint64x1_t) -> float32x2_t { +pub fn vreinterpret_f32_p8(a: poly8x8_t) -> float32x2_t { + let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { let ret_val: float32x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_u64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_p8)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -45722,11 +53148,11 @@ pub fn vreinterpret_f32_u64(a: uint64x1_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s8_u64(a: uint64x1_t) -> int8x8_t { +pub fn vreinterpret_s8_p8(a: poly8x8_t) -> int8x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_u64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_p8)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -45744,14 +53170,15 @@ pub fn vreinterpret_s8_u64(a: uint64x1_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s8_u64(a: uint64x1_t) -> int8x8_t { +pub fn vreinterpret_s8_p8(a: poly8x8_t) -> int8x8_t { + let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { let ret_val: int8x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_u64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_p8)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -45769,11 +53196,11 @@ pub fn vreinterpret_s8_u64(a: uint64x1_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s16_u64(a: uint64x1_t) -> int16x4_t { +pub fn vreinterpret_s16_p8(a: poly8x8_t) -> int16x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_u64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_p8)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -45791,14 +53218,15 @@ pub fn vreinterpret_s16_u64(a: uint64x1_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s16_u64(a: uint64x1_t) -> int16x4_t { +pub fn vreinterpret_s16_p8(a: poly8x8_t) -> int16x4_t { + let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { let ret_val: int16x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_u64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_p8)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -45816,11 +53244,11 @@ pub fn vreinterpret_s16_u64(a: uint64x1_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s32_u64(a: uint64x1_t) -> int32x2_t { +pub fn vreinterpret_s32_p8(a: poly8x8_t) -> int32x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_u64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_p8)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -45838,15 +53266,17 @@ pub fn vreinterpret_s32_u64(a: uint64x1_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s32_u64(a: uint64x1_t) -> int32x2_t { +pub fn vreinterpret_s32_p8(a: poly8x8_t) -> int32x2_t { + let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { let ret_val: int32x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_u64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_p8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -45862,11 +53292,34 @@ pub fn vreinterpret_s32_u64(a: uint64x1_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s64_u64(a: uint64x1_t) -> int64x1_t { +pub fn vreinterpret_s64_p8(a: poly8x8_t) -> int64x1_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_u64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_p8)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vreinterpret_s64_p8(a: poly8x8_t) -> int64x1_t { + let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; + unsafe { transmute(a) } +} +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_p8)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -45884,11 +53337,11 @@ pub fn vreinterpret_s64_u64(a: uint64x1_t) -> int64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u8_u64(a: uint64x1_t) -> uint8x8_t { +pub fn vreinterpret_u8_p8(a: poly8x8_t) -> uint8x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_u64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_p8)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -45906,14 +53359,15 @@ pub fn vreinterpret_u8_u64(a: uint64x1_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u8_u64(a: uint64x1_t) -> uint8x8_t { +pub fn vreinterpret_u8_p8(a: poly8x8_t) -> uint8x8_t { + let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { let ret_val: uint8x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_u64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_p8)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -45931,11 +53385,11 @@ pub fn vreinterpret_u8_u64(a: uint64x1_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u16_u64(a: uint64x1_t) -> uint16x4_t { +pub fn vreinterpret_u16_p8(a: poly8x8_t) -> uint16x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_u64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_p8)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -45953,14 +53407,15 @@ pub fn vreinterpret_u16_u64(a: uint64x1_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u16_u64(a: uint64x1_t) -> uint16x4_t { +pub fn vreinterpret_u16_p8(a: poly8x8_t) -> uint16x4_t { + let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { let ret_val: uint16x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_u64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_p8)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -45978,11 +53433,11 @@ pub fn vreinterpret_u16_u64(a: uint64x1_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u32_u64(a: uint64x1_t) -> uint32x2_t { +pub fn vreinterpret_u32_p8(a: poly8x8_t) -> uint32x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_u64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_p8)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -46000,14 +53455,15 @@ pub fn vreinterpret_u32_u64(a: uint64x1_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u32_u64(a: uint64x1_t) -> uint32x2_t { +pub fn vreinterpret_u32_p8(a: poly8x8_t) -> uint32x2_t { + let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { let ret_val: uint32x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_u64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_p8)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -46025,11 +53481,11 @@ pub fn vreinterpret_u32_u64(a: uint64x1_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p8_u64(a: uint64x1_t) -> poly8x8_t { +pub fn vreinterpret_u64_p8(a: poly8x8_t) -> uint64x1_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_u64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_p8)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -46047,14 +53503,12 @@ pub fn vreinterpret_p8_u64(a: uint64x1_t) -> poly8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p8_u64(a: uint64x1_t) -> poly8x8_t { - unsafe { - let ret_val: poly8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } +pub fn vreinterpret_u64_p8(a: poly8x8_t) -> uint64x1_t { + let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_u64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_p8)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -46072,11 +53526,11 @@ pub fn vreinterpret_p8_u64(a: uint64x1_t) -> poly8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p16_u64(a: uint64x1_t) -> poly16x4_t { +pub fn vreinterpret_p16_p8(a: poly8x8_t) -> poly16x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_u64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_p8)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -46094,14 +53548,15 @@ pub fn vreinterpret_p16_u64(a: uint64x1_t) -> poly16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p16_u64(a: uint64x1_t) -> poly16x4_t { +pub fn vreinterpret_p16_p8(a: poly8x8_t) -> poly16x4_t { + let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { let ret_val: poly16x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_u64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_p8)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -46119,11 +53574,11 @@ pub fn vreinterpret_p16_u64(a: uint64x1_t) -> poly16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_f32_u64(a: uint64x2_t) -> float32x4_t { +pub fn vreinterpretq_f32_p8(a: poly8x16_t) -> float32x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_u64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_p8)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -46141,15 +53596,16 @@ pub fn vreinterpretq_f32_u64(a: uint64x2_t) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_f32_u64(a: uint64x2_t) -> float32x4_t { - let a: uint64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; +pub fn vreinterpretq_f32_p8(a: poly8x16_t) -> float32x4_t { + let a: poly8x16_t = + unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { let ret_val: float32x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_u64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_p8)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -46167,11 +53623,11 @@ pub fn vreinterpretq_f32_u64(a: uint64x2_t) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s8_u64(a: uint64x2_t) -> int8x16_t { +pub fn vreinterpretq_s8_p8(a: poly8x16_t) -> int8x16_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_u64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_p8)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -46189,8 +53645,9 @@ pub fn vreinterpretq_s8_u64(a: uint64x2_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s8_u64(a: uint64x2_t) -> int8x16_t { - let a: uint64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; +pub fn vreinterpretq_s8_p8(a: poly8x16_t) -> int8x16_t { + let a: poly8x16_t = + unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { let ret_val: int8x16_t = transmute(a); simd_shuffle!( @@ -46201,7 +53658,7 @@ pub fn vreinterpretq_s8_u64(a: uint64x2_t) -> int8x16_t { } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_u64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_p8)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -46219,11 +53676,11 @@ pub fn vreinterpretq_s8_u64(a: uint64x2_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s16_u64(a: uint64x2_t) -> int16x8_t { +pub fn vreinterpretq_s16_p8(a: poly8x16_t) -> int16x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_u64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_p8)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -46241,15 +53698,16 @@ pub fn vreinterpretq_s16_u64(a: uint64x2_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s16_u64(a: uint64x2_t) -> int16x8_t { - let a: uint64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; +pub fn vreinterpretq_s16_p8(a: poly8x16_t) -> int16x8_t { + let a: poly8x16_t = + unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { let ret_val: int16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_u64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_p8)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -46267,11 +53725,11 @@ pub fn vreinterpretq_s16_u64(a: uint64x2_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s32_u64(a: uint64x2_t) -> int32x4_t { +pub fn vreinterpretq_s32_p8(a: poly8x16_t) -> int32x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_u64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_p8)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -46289,15 +53747,16 @@ pub fn vreinterpretq_s32_u64(a: uint64x2_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s32_u64(a: uint64x2_t) -> int32x4_t { - let a: uint64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; +pub fn vreinterpretq_s32_p8(a: poly8x16_t) -> int32x4_t { + let a: poly8x16_t = + unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { let ret_val: int32x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_u64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_p8)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -46315,11 +53774,11 @@ pub fn vreinterpretq_s32_u64(a: uint64x2_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s64_u64(a: uint64x2_t) -> int64x2_t { +pub fn vreinterpretq_s64_p8(a: poly8x16_t) -> int64x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_u64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_p8)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -46337,15 +53796,16 @@ pub fn vreinterpretq_s64_u64(a: uint64x2_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s64_u64(a: uint64x2_t) -> int64x2_t { - let a: uint64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; +pub fn vreinterpretq_s64_p8(a: poly8x16_t) -> int64x2_t { + let a: poly8x16_t = + unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { let ret_val: int64x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_u64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_p8)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -46363,11 +53823,11 @@ pub fn vreinterpretq_s64_u64(a: uint64x2_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u8_u64(a: uint64x2_t) -> uint8x16_t { +pub fn vreinterpretq_u8_p8(a: poly8x16_t) -> uint8x16_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_u64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_p8)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -46385,8 +53845,9 @@ pub fn vreinterpretq_u8_u64(a: uint64x2_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u8_u64(a: uint64x2_t) -> uint8x16_t { - let a: uint64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; +pub fn vreinterpretq_u8_p8(a: poly8x16_t) -> uint8x16_t { + let a: poly8x16_t = + unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { let ret_val: uint8x16_t = transmute(a); simd_shuffle!( @@ -46397,7 +53858,7 @@ pub fn vreinterpretq_u8_u64(a: uint64x2_t) -> uint8x16_t { } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_u64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_p8)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -46415,11 +53876,11 @@ pub fn vreinterpretq_u8_u64(a: uint64x2_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u16_u64(a: uint64x2_t) -> uint16x8_t { +pub fn vreinterpretq_u16_p8(a: poly8x16_t) -> uint16x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_u64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_p8)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -46437,15 +53898,16 @@ pub fn vreinterpretq_u16_u64(a: uint64x2_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u16_u64(a: uint64x2_t) -> uint16x8_t { - let a: uint64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; +pub fn vreinterpretq_u16_p8(a: poly8x16_t) -> uint16x8_t { + let a: poly8x16_t = + unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { let ret_val: uint16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_u64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_p8)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -46463,11 +53925,11 @@ pub fn vreinterpretq_u16_u64(a: uint64x2_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u32_u64(a: uint64x2_t) -> uint32x4_t { +pub fn vreinterpretq_u32_p8(a: poly8x16_t) -> uint32x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_u64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_p8)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -46485,15 +53947,16 @@ pub fn vreinterpretq_u32_u64(a: uint64x2_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u32_u64(a: uint64x2_t) -> uint32x4_t { - let a: uint64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; +pub fn vreinterpretq_u32_p8(a: poly8x16_t) -> uint32x4_t { + let a: poly8x16_t = + unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { let ret_val: uint32x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_u64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_p8)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -46511,11 +53974,11 @@ pub fn vreinterpretq_u32_u64(a: uint64x2_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p8_u64(a: uint64x2_t) -> poly8x16_t { +pub fn vreinterpretq_u64_p8(a: poly8x16_t) -> uint64x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_u64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_p8)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -46533,19 +53996,16 @@ pub fn vreinterpretq_p8_u64(a: uint64x2_t) -> poly8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p8_u64(a: uint64x2_t) -> poly8x16_t { - let a: uint64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; +pub fn vreinterpretq_u64_p8(a: poly8x16_t) -> uint64x2_t { + let a: poly8x16_t = + unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { - let ret_val: poly8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) + let ret_val: uint64x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_u64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_p8)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -46563,11 +54023,11 @@ pub fn vreinterpretq_p8_u64(a: uint64x2_t) -> poly8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p16_u64(a: uint64x2_t) -> poly16x8_t { +pub fn vreinterpretq_p16_p8(a: poly8x16_t) -> poly16x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_u64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_p8)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -46585,15 +54045,16 @@ pub fn vreinterpretq_p16_u64(a: uint64x2_t) -> poly16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p16_u64(a: uint64x2_t) -> poly16x8_t { - let a: uint64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; +pub fn vreinterpretq_p16_p8(a: poly8x16_t) -> poly16x8_t { + let a: poly8x16_t = + unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { let ret_val: poly16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_p16)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -46611,11 +54072,11 @@ pub fn vreinterpretq_p16_u64(a: uint64x2_t) -> poly16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_f32_p8(a: poly8x8_t) -> float32x2_t { +pub fn vreinterpret_f32_p16(a: poly16x4_t) -> float32x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_p16)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -46633,15 +54094,15 @@ pub fn vreinterpret_f32_p8(a: poly8x8_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_f32_p8(a: poly8x8_t) -> float32x2_t { - let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; +pub fn vreinterpret_f32_p16(a: poly16x4_t) -> float32x2_t { + let a: poly16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { let ret_val: float32x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_p16)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -46659,11 +54120,11 @@ pub fn vreinterpret_f32_p8(a: poly8x8_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s8_p8(a: poly8x8_t) -> int8x8_t { +pub fn vreinterpret_s8_p16(a: poly16x4_t) -> int8x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_p16)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -46681,15 +54142,15 @@ pub fn vreinterpret_s8_p8(a: poly8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s8_p8(a: poly8x8_t) -> int8x8_t { - let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; +pub fn vreinterpret_s8_p16(a: poly16x4_t) -> int8x8_t { + let a: poly16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { let ret_val: int8x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_p16)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -46707,11 +54168,11 @@ pub fn vreinterpret_s8_p8(a: poly8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s16_p8(a: poly8x8_t) -> int16x4_t { +pub fn vreinterpret_s16_p16(a: poly16x4_t) -> int16x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_p16)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -46729,15 +54190,15 @@ pub fn vreinterpret_s16_p8(a: poly8x8_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s16_p8(a: poly8x8_t) -> int16x4_t { - let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; +pub fn vreinterpret_s16_p16(a: poly16x4_t) -> int16x4_t { + let a: poly16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { let ret_val: int16x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_p16)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -46755,11 +54216,11 @@ pub fn vreinterpret_s16_p8(a: poly8x8_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s32_p8(a: poly8x8_t) -> int32x2_t { +pub fn vreinterpret_s32_p16(a: poly16x4_t) -> int32x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_p16)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -46777,15 +54238,15 @@ pub fn vreinterpret_s32_p8(a: poly8x8_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s32_p8(a: poly8x8_t) -> int32x2_t { - let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; +pub fn vreinterpret_s32_p16(a: poly16x4_t) -> int32x2_t { + let a: poly16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { let ret_val: int32x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_p16)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -46803,11 +54264,11 @@ pub fn vreinterpret_s32_p8(a: poly8x8_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s64_p8(a: poly8x8_t) -> int64x1_t { +pub fn vreinterpret_s64_p16(a: poly16x4_t) -> int64x1_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_p16)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -46825,12 +54286,12 @@ pub fn vreinterpret_s64_p8(a: poly8x8_t) -> int64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s64_p8(a: poly8x8_t) -> int64x1_t { - let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; +pub fn vreinterpret_s64_p16(a: poly16x4_t) -> int64x1_t { + let a: poly16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_p16)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -46848,11 +54309,11 @@ pub fn vreinterpret_s64_p8(a: poly8x8_t) -> int64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u8_p8(a: poly8x8_t) -> uint8x8_t { +pub fn vreinterpret_u8_p16(a: poly16x4_t) -> uint8x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_p16)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -46870,15 +54331,15 @@ pub fn vreinterpret_u8_p8(a: poly8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u8_p8(a: poly8x8_t) -> uint8x8_t { - let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; +pub fn vreinterpret_u8_p16(a: poly16x4_t) -> uint8x8_t { + let a: poly16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { let ret_val: uint8x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_p16)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -46896,11 +54357,11 @@ pub fn vreinterpret_u8_p8(a: poly8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u16_p8(a: poly8x8_t) -> uint16x4_t { +pub fn vreinterpret_u16_p16(a: poly16x4_t) -> uint16x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_p16)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -46918,15 +54379,15 @@ pub fn vreinterpret_u16_p8(a: poly8x8_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u16_p8(a: poly8x8_t) -> uint16x4_t { - let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; +pub fn vreinterpret_u16_p16(a: poly16x4_t) -> uint16x4_t { + let a: poly16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { let ret_val: uint16x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_p16)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -46944,11 +54405,11 @@ pub fn vreinterpret_u16_p8(a: poly8x8_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u32_p8(a: poly8x8_t) -> uint32x2_t { +pub fn vreinterpret_u32_p16(a: poly16x4_t) -> uint32x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_p16)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -46966,15 +54427,15 @@ pub fn vreinterpret_u32_p8(a: poly8x8_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u32_p8(a: poly8x8_t) -> uint32x2_t { - let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; +pub fn vreinterpret_u32_p16(a: poly16x4_t) -> uint32x2_t { + let a: poly16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { let ret_val: uint32x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_p16)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -46992,11 +54453,11 @@ pub fn vreinterpret_u32_p8(a: poly8x8_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u64_p8(a: poly8x8_t) -> uint64x1_t { +pub fn vreinterpret_u64_p16(a: poly16x4_t) -> uint64x1_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_p16)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -47014,12 +54475,12 @@ pub fn vreinterpret_u64_p8(a: poly8x8_t) -> uint64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u64_p8(a: poly8x8_t) -> uint64x1_t { - let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; +pub fn vreinterpret_u64_p16(a: poly16x4_t) -> uint64x1_t { + let a: poly16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_p16)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -47037,11 +54498,11 @@ pub fn vreinterpret_u64_p8(a: poly8x8_t) -> uint64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p16_p8(a: poly8x8_t) -> poly16x4_t { +pub fn vreinterpret_p8_p16(a: poly16x4_t) -> poly8x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_p16)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -47059,15 +54520,15 @@ pub fn vreinterpret_p16_p8(a: poly8x8_t) -> poly16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p16_p8(a: poly8x8_t) -> poly16x4_t { - let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; +pub fn vreinterpret_p8_p16(a: poly16x4_t) -> poly8x8_t { + let a: poly16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { - let ret_val: poly16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + let ret_val: poly8x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_p16)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -47085,11 +54546,11 @@ pub fn vreinterpret_p16_p8(a: poly8x8_t) -> poly16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_f32_p8(a: poly8x16_t) -> float32x4_t { +pub fn vreinterpretq_f32_p16(a: poly16x8_t) -> float32x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_p16)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -47107,16 +54568,15 @@ pub fn vreinterpretq_f32_p8(a: poly8x16_t) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_f32_p8(a: poly8x16_t) -> float32x4_t { - let a: poly8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; +pub fn vreinterpretq_f32_p16(a: poly16x8_t) -> float32x4_t { + let a: poly16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { let ret_val: float32x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_p16)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -47134,11 +54594,11 @@ pub fn vreinterpretq_f32_p8(a: poly8x16_t) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s8_p8(a: poly8x16_t) -> int8x16_t { +pub fn vreinterpretq_s8_p16(a: poly16x8_t) -> int8x16_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_p16)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -47156,9 +54616,8 @@ pub fn vreinterpretq_s8_p8(a: poly8x16_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s8_p8(a: poly8x16_t) -> int8x16_t { - let a: poly8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; +pub fn vreinterpretq_s8_p16(a: poly16x8_t) -> int8x16_t { + let a: poly16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { let ret_val: int8x16_t = transmute(a); simd_shuffle!( @@ -47169,7 +54628,7 @@ pub fn vreinterpretq_s8_p8(a: poly8x16_t) -> int8x16_t { } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_p16)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -47187,11 +54646,11 @@ pub fn vreinterpretq_s8_p8(a: poly8x16_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s16_p8(a: poly8x16_t) -> int16x8_t { +pub fn vreinterpretq_s16_p16(a: poly16x8_t) -> int16x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_p16)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -47209,16 +54668,15 @@ pub fn vreinterpretq_s16_p8(a: poly8x16_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s16_p8(a: poly8x16_t) -> int16x8_t { - let a: poly8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; +pub fn vreinterpretq_s16_p16(a: poly16x8_t) -> int16x8_t { + let a: poly16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { let ret_val: int16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_p16)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -47236,11 +54694,11 @@ pub fn vreinterpretq_s16_p8(a: poly8x16_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s32_p8(a: poly8x16_t) -> int32x4_t { +pub fn vreinterpretq_s32_p16(a: poly16x8_t) -> int32x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_p16)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -47258,16 +54716,15 @@ pub fn vreinterpretq_s32_p8(a: poly8x16_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s32_p8(a: poly8x16_t) -> int32x4_t { - let a: poly8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; +pub fn vreinterpretq_s32_p16(a: poly16x8_t) -> int32x4_t { + let a: poly16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { let ret_val: int32x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_p16)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -47285,11 +54742,11 @@ pub fn vreinterpretq_s32_p8(a: poly8x16_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s64_p8(a: poly8x16_t) -> int64x2_t { +pub fn vreinterpretq_s64_p16(a: poly16x8_t) -> int64x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_p16)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -47307,118 +54764,15 @@ pub fn vreinterpretq_s64_p8(a: poly8x16_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s64_p8(a: poly8x16_t) -> int64x2_t { - let a: poly8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; +pub fn vreinterpretq_s64_p16(a: poly16x8_t) -> int64x2_t { + let a: poly16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { let ret_val: int64x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_p8)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_u8_p8(a: poly8x16_t) -> uint8x16_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_p8)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_u8_p8(a: poly8x16_t) -> uint8x16_t { - let a: poly8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: uint8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_p8)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_u16_p8(a: poly8x16_t) -> uint16x8_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_p8)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_u16_p8(a: poly8x16_t) -> uint16x8_t { - let a: poly8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: uint16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_p16)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -47436,11 +54790,11 @@ pub fn vreinterpretq_u16_p8(a: poly8x16_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u32_p8(a: poly8x16_t) -> uint32x4_t { +pub fn vreinterpretq_u8_p16(a: poly16x8_t) -> uint8x16_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_p16)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -47458,16 +54812,19 @@ pub fn vreinterpretq_u32_p8(a: poly8x16_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u32_p8(a: poly8x16_t) -> uint32x4_t { - let a: poly8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; +pub fn vreinterpretq_u8_p16(a: poly16x8_t) -> uint8x16_t { + let a: poly16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { - let ret_val: uint32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + let ret_val: uint8x16_t = transmute(a); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_p16)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -47485,11 +54842,11 @@ pub fn vreinterpretq_u32_p8(a: poly8x16_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u64_p8(a: poly8x16_t) -> uint64x2_t { +pub fn vreinterpretq_u16_p16(a: poly16x8_t) -> uint16x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_p16)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -47507,16 +54864,15 @@ pub fn vreinterpretq_u64_p8(a: poly8x16_t) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u64_p8(a: poly8x16_t) -> uint64x2_t { - let a: poly8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; +pub fn vreinterpretq_u16_p16(a: poly16x8_t) -> uint16x8_t { + let a: poly16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { - let ret_val: uint64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) + let ret_val: uint16x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_p16)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -47534,11 +54890,11 @@ pub fn vreinterpretq_u64_p8(a: poly8x16_t) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p16_p8(a: poly8x16_t) -> poly16x8_t { +pub fn vreinterpretq_u32_p16(a: poly16x8_t) -> uint32x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_p16)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -47556,16 +54912,15 @@ pub fn vreinterpretq_p16_p8(a: poly8x16_t) -> poly16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p16_p8(a: poly8x16_t) -> poly16x8_t { - let a: poly8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; +pub fn vreinterpretq_u32_p16(a: poly16x8_t) -> uint32x4_t { + let a: poly16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { - let ret_val: poly16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + let ret_val: uint32x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_p16)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -47583,11 +54938,11 @@ pub fn vreinterpretq_p16_p8(a: poly8x16_t) -> poly16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_f32_p16(a: poly16x4_t) -> float32x2_t { +pub fn vreinterpretq_u64_p16(a: poly16x8_t) -> uint64x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_p16)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -47605,15 +54960,15 @@ pub fn vreinterpret_f32_p16(a: poly16x4_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_f32_p16(a: poly16x4_t) -> float32x2_t { - let a: poly16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; +pub fn vreinterpretq_u64_p16(a: poly16x8_t) -> uint64x2_t { + let a: poly16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { - let ret_val: float32x2_t = transmute(a); + let ret_val: uint64x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_p16)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] @@ -47631,11 +54986,11 @@ pub fn vreinterpret_f32_p16(a: poly16x4_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s8_p16(a: poly16x4_t) -> int8x8_t { +pub fn vreinterpretq_p8_p16(a: poly16x8_t) -> poly8x16_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_p16)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] @@ -47653,19 +55008,23 @@ pub fn vreinterpret_s8_p16(a: poly16x4_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s8_p16(a: poly16x4_t) -> int8x8_t { - let a: poly16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; +pub fn vreinterpretq_p8_p16(a: poly16x8_t) -> poly8x16_t { + let a: poly16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { - let ret_val: int8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + let ret_val: poly8x16_t = transmute(a); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_p128)"] #[inline] #[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -47679,15 +55038,15 @@ pub fn vreinterpret_s8_p16(a: poly16x4_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s16_p16(a: poly16x4_t) -> int16x4_t { +pub fn vreinterpretq_s8_p128(a: p128) -> int8x16_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_p128)"] #[inline] #[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -47701,19 +55060,22 @@ pub fn vreinterpret_s16_p16(a: poly16x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s16_p16(a: poly16x4_t) -> int16x4_t { - let a: poly16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; +pub fn vreinterpretq_s8_p128(a: p128) -> int8x16_t { unsafe { - let ret_val: int16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + let ret_val: int8x16_t = transmute(a); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_p128)"] #[inline] #[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -47727,15 +55089,15 @@ pub fn vreinterpret_s16_p16(a: poly16x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s32_p16(a: poly16x4_t) -> int32x2_t { +pub fn vreinterpretq_s16_p128(a: p128) -> int16x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_p128)"] #[inline] #[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -47749,19 +55111,18 @@ pub fn vreinterpret_s32_p16(a: poly16x4_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s32_p16(a: poly16x4_t) -> int32x2_t { - let a: poly16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; +pub fn vreinterpretq_s16_p128(a: p128) -> int16x8_t { unsafe { - let ret_val: int32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) + let ret_val: int16x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_p128)"] #[inline] #[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -47775,15 +55136,15 @@ pub fn vreinterpret_s32_p16(a: poly16x4_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s64_p16(a: poly16x4_t) -> int64x1_t { +pub fn vreinterpretq_s32_p128(a: p128) -> int32x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_p128)"] #[inline] #[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -47797,16 +55158,18 @@ pub fn vreinterpret_s64_p16(a: poly16x4_t) -> int64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s64_p16(a: poly16x4_t) -> int64x1_t { - let a: poly16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - unsafe { transmute(a) } +pub fn vreinterpretq_s32_p128(a: p128) -> int32x4_t { + unsafe { + let ret_val: int32x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_p128)"] #[inline] #[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -47820,15 +55183,15 @@ pub fn vreinterpret_s64_p16(a: poly16x4_t) -> int64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u8_p16(a: poly16x4_t) -> uint8x8_t { +pub fn vreinterpretq_s64_p128(a: p128) -> int64x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_p128)"] #[inline] #[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -47842,19 +55205,18 @@ pub fn vreinterpret_u8_p16(a: poly16x4_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u8_p16(a: poly16x4_t) -> uint8x8_t { - let a: poly16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; +pub fn vreinterpretq_s64_p128(a: p128) -> int64x2_t { unsafe { - let ret_val: uint8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + let ret_val: int64x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_p128)"] #[inline] #[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -47868,15 +55230,15 @@ pub fn vreinterpret_u8_p16(a: poly16x4_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u16_p16(a: poly16x4_t) -> uint16x4_t { +pub fn vreinterpretq_u8_p128(a: p128) -> uint8x16_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_p128)"] #[inline] #[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -47890,19 +55252,22 @@ pub fn vreinterpret_u16_p16(a: poly16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u16_p16(a: poly16x4_t) -> uint16x4_t { - let a: poly16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; +pub fn vreinterpretq_u8_p128(a: p128) -> uint8x16_t { unsafe { - let ret_val: uint16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + let ret_val: uint8x16_t = transmute(a); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_p128)"] #[inline] #[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -47916,15 +55281,15 @@ pub fn vreinterpret_u16_p16(a: poly16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u32_p16(a: poly16x4_t) -> uint32x2_t { +pub fn vreinterpretq_u16_p128(a: p128) -> uint16x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_p128)"] #[inline] #[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -47938,19 +55303,18 @@ pub fn vreinterpret_u32_p16(a: poly16x4_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u32_p16(a: poly16x4_t) -> uint32x2_t { - let a: poly16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; +pub fn vreinterpretq_u16_p128(a: p128) -> uint16x8_t { unsafe { - let ret_val: uint32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) + let ret_val: uint16x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_p128)"] #[inline] #[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -47964,15 +55328,15 @@ pub fn vreinterpret_u32_p16(a: poly16x4_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u64_p16(a: poly16x4_t) -> uint64x1_t { +pub fn vreinterpretq_u32_p128(a: p128) -> uint32x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_p128)"] #[inline] #[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -47986,16 +55350,18 @@ pub fn vreinterpret_u64_p16(a: poly16x4_t) -> uint64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u64_p16(a: poly16x4_t) -> uint64x1_t { - let a: poly16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - unsafe { transmute(a) } +pub fn vreinterpretq_u32_p128(a: p128) -> uint32x4_t { + unsafe { + let ret_val: uint32x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_p128)"] #[inline] #[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -48009,15 +55375,15 @@ pub fn vreinterpret_u64_p16(a: poly16x4_t) -> uint64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p8_p16(a: poly16x4_t) -> poly8x8_t { +pub fn vreinterpretq_u64_p128(a: p128) -> uint64x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_p128)"] #[inline] #[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -48031,19 +55397,18 @@ pub fn vreinterpret_p8_p16(a: poly16x4_t) -> poly8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p8_p16(a: poly16x4_t) -> poly8x8_t { - let a: poly16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; +pub fn vreinterpretq_u64_p128(a: p128) -> uint64x2_t { unsafe { - let ret_val: poly8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + let ret_val: uint64x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_p128)"] #[inline] #[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -48057,15 +55422,15 @@ pub fn vreinterpret_p8_p16(a: poly16x4_t) -> poly8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_f32_p16(a: poly16x8_t) -> float32x4_t { +pub fn vreinterpretq_p8_p128(a: p128) -> poly8x16_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_p128)"] #[inline] #[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -48079,19 +55444,22 @@ pub fn vreinterpretq_f32_p16(a: poly16x8_t) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_f32_p16(a: poly16x8_t) -> float32x4_t { - let a: poly16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; +pub fn vreinterpretq_p8_p128(a: p128) -> poly8x16_t { unsafe { - let ret_val: float32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + let ret_val: poly8x16_t = transmute(a); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_p128)"] #[inline] #[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -48105,15 +55473,15 @@ pub fn vreinterpretq_f32_p16(a: poly16x8_t) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s8_p16(a: poly16x8_t) -> int8x16_t { +pub fn vreinterpretq_p16_p128(a: p128) -> poly16x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_p128)"] #[inline] #[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -48127,23 +55495,18 @@ pub fn vreinterpretq_s8_p16(a: poly16x8_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s8_p16(a: poly16x8_t) -> int8x16_t { - let a: poly16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; +pub fn vreinterpretq_p16_p128(a: p128) -> poly16x8_t { unsafe { - let ret_val: int8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) + let ret_val: poly16x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_p128)"] #[inline] #[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -48157,15 +55520,15 @@ pub fn vreinterpretq_s8_p16(a: poly16x8_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s16_p16(a: poly16x8_t) -> int16x8_t { +pub fn vreinterpretq_p64_p128(a: p128) -> poly64x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_p128)"] #[inline] #[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -48179,19 +55542,18 @@ pub fn vreinterpretq_s16_p16(a: poly16x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s16_p16(a: poly16x8_t) -> int16x8_t { - let a: poly16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; +pub fn vreinterpretq_p64_p128(a: p128) -> poly64x2_t { unsafe { - let ret_val: int16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + let ret_val: poly64x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_s8)"] #[inline] #[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -48205,15 +55567,15 @@ pub fn vreinterpretq_s16_p16(a: poly16x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s32_p16(a: poly16x8_t) -> int32x4_t { +pub fn vreinterpret_p64_s8(a: int8x8_t) -> poly64x1_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_s8)"] #[inline] #[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -48227,19 +55589,16 @@ pub fn vreinterpretq_s32_p16(a: poly16x8_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s32_p16(a: poly16x8_t) -> int32x4_t { - let a: poly16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: int32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } +pub fn vreinterpret_p64_s8(a: int8x8_t) -> poly64x1_t { + let a: int8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_s8)"] #[inline] #[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -48253,15 +55612,15 @@ pub fn vreinterpretq_s32_p16(a: poly16x8_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s64_p16(a: poly16x8_t) -> int64x2_t { +pub fn vreinterpretq_p128_s8(a: int8x16_t) -> p128 { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_s8)"] #[inline] #[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -48275,19 +55634,17 @@ pub fn vreinterpretq_s64_p16(a: poly16x8_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s64_p16(a: poly16x8_t) -> int64x2_t { - let a: poly16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: int64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } +pub fn vreinterpretq_p128_s8(a: int8x16_t) -> p128 { + let a: int8x16_t = + unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_s8)"] #[inline] #[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -48301,15 +55658,15 @@ pub fn vreinterpretq_s64_p16(a: poly16x8_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u8_p16(a: poly16x8_t) -> uint8x16_t { +pub fn vreinterpretq_p64_s8(a: int8x16_t) -> poly64x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_s8)"] #[inline] #[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -48323,23 +55680,20 @@ pub fn vreinterpretq_u8_p16(a: poly16x8_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u8_p16(a: poly16x8_t) -> uint8x16_t { - let a: poly16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; +pub fn vreinterpretq_p64_s8(a: int8x16_t) -> poly64x2_t { + let a: int8x16_t = + unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { - let ret_val: uint8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) + let ret_val: poly64x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_s16)"] #[inline] #[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -48353,15 +55707,15 @@ pub fn vreinterpretq_u8_p16(a: poly16x8_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u16_p16(a: poly16x8_t) -> uint16x8_t { +pub fn vreinterpret_p64_s16(a: int16x4_t) -> poly64x1_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_s16)"] #[inline] #[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -48375,19 +55729,16 @@ pub fn vreinterpretq_u16_p16(a: poly16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u16_p16(a: poly16x8_t) -> uint16x8_t { - let a: poly16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: uint16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } +pub fn vreinterpret_p64_s16(a: int16x4_t) -> poly64x1_t { + let a: int16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_s16)"] #[inline] #[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -48401,15 +55752,15 @@ pub fn vreinterpretq_u16_p16(a: poly16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u32_p16(a: poly16x8_t) -> uint32x4_t { +pub fn vreinterpretq_p128_s16(a: int16x8_t) -> p128 { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_s16)"] #[inline] #[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -48423,19 +55774,16 @@ pub fn vreinterpretq_u32_p16(a: poly16x8_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u32_p16(a: poly16x8_t) -> uint32x4_t { - let a: poly16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: uint32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } +pub fn vreinterpretq_p128_s16(a: int16x8_t) -> p128 { + let a: int16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_s16)"] #[inline] #[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -48449,15 +55797,15 @@ pub fn vreinterpretq_u32_p16(a: poly16x8_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u64_p16(a: poly16x8_t) -> uint64x2_t { +pub fn vreinterpretq_p64_s16(a: int16x8_t) -> poly64x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_s16)"] #[inline] #[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -48471,19 +55819,19 @@ pub fn vreinterpretq_u64_p16(a: poly16x8_t) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u64_p16(a: poly16x8_t) -> uint64x2_t { - let a: poly16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; +pub fn vreinterpretq_p64_s16(a: int16x8_t) -> poly64x2_t { + let a: int16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { - let ret_val: uint64x2_t = transmute(a); + let ret_val: poly64x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_s32)"] #[inline] #[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -48497,15 +55845,15 @@ pub fn vreinterpretq_u64_p16(a: poly16x8_t) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p8_p16(a: poly16x8_t) -> poly8x16_t { +pub fn vreinterpret_p64_s32(a: int32x2_t) -> poly64x1_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_s32)"] #[inline] #[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -48519,19 +55867,12 @@ pub fn vreinterpretq_p8_p16(a: poly16x8_t) -> poly8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p8_p16(a: poly16x8_t) -> poly8x16_t { - let a: poly16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: poly8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - } +pub fn vreinterpret_p64_s32(a: int32x2_t) -> poly64x1_t { + let a: int32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_p128)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_s32)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] @@ -48549,11 +55890,11 @@ pub fn vreinterpretq_p8_p16(a: poly16x8_t) -> poly8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s8_p128(a: p128) -> int8x16_t { +pub fn vreinterpretq_p128_s32(a: int32x4_t) -> p128 { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_p128)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_s32)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] @@ -48571,18 +55912,12 @@ pub fn vreinterpretq_s8_p128(a: p128) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s8_p128(a: p128) -> int8x16_t { - unsafe { - let ret_val: int8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - } +pub fn vreinterpretq_p128_s32(a: int32x4_t) -> p128 { + let a: int32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_p128)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_s32)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] @@ -48600,11 +55935,11 @@ pub fn vreinterpretq_s8_p128(a: p128) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s16_p128(a: p128) -> int16x8_t { +pub fn vreinterpretq_p64_s32(a: int32x4_t) -> poly64x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_p128)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_s32)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] @@ -48622,14 +55957,15 @@ pub fn vreinterpretq_s16_p128(a: p128) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s16_p128(a: p128) -> int16x8_t { +pub fn vreinterpretq_p64_s32(a: int32x4_t) -> poly64x2_t { + let a: int32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { - let ret_val: int16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + let ret_val: poly64x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_p128)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_s64)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] @@ -48647,11 +55983,11 @@ pub fn vreinterpretq_s16_p128(a: p128) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s32_p128(a: p128) -> int32x4_t { +pub fn vreinterpretq_p128_s64(a: int64x2_t) -> p128 { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_p128)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_s64)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] @@ -48669,14 +56005,12 @@ pub fn vreinterpretq_s32_p128(a: p128) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s32_p128(a: p128) -> int32x4_t { - unsafe { - let ret_val: int32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } +pub fn vreinterpretq_p128_s64(a: int64x2_t) -> p128 { + let a: int64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_p128)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_u8)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] @@ -48694,11 +56028,11 @@ pub fn vreinterpretq_s32_p128(a: p128) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s64_p128(a: p128) -> int64x2_t { +pub fn vreinterpret_p64_u8(a: uint8x8_t) -> poly64x1_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_p128)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_u8)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] @@ -48716,14 +56050,12 @@ pub fn vreinterpretq_s64_p128(a: p128) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s64_p128(a: p128) -> int64x2_t { - unsafe { - let ret_val: int64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } +pub fn vreinterpret_p64_u8(a: uint8x8_t) -> poly64x1_t { + let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_p128)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_u8)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] @@ -48741,11 +56073,11 @@ pub fn vreinterpretq_s64_p128(a: p128) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u8_p128(a: p128) -> uint8x16_t { +pub fn vreinterpretq_p128_u8(a: uint8x16_t) -> p128 { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_p128)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_u8)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] @@ -48763,18 +56095,13 @@ pub fn vreinterpretq_u8_p128(a: p128) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u8_p128(a: p128) -> uint8x16_t { - unsafe { - let ret_val: uint8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - } +pub fn vreinterpretq_p128_u8(a: uint8x16_t) -> p128 { + let a: uint8x16_t = + unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_p128)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_u8)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] @@ -48792,11 +56119,11 @@ pub fn vreinterpretq_u8_p128(a: p128) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u16_p128(a: p128) -> uint16x8_t { +pub fn vreinterpretq_p64_u8(a: uint8x16_t) -> poly64x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_p128)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_u8)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] @@ -48814,14 +56141,16 @@ pub fn vreinterpretq_u16_p128(a: p128) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u16_p128(a: p128) -> uint16x8_t { +pub fn vreinterpretq_p64_u8(a: uint8x16_t) -> poly64x2_t { + let a: uint8x16_t = + unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { - let ret_val: uint16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + let ret_val: poly64x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_p128)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_u16)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] @@ -48839,11 +56168,11 @@ pub fn vreinterpretq_u16_p128(a: p128) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u32_p128(a: p128) -> uint32x4_t { +pub fn vreinterpret_p64_u16(a: uint16x4_t) -> poly64x1_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_p128)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_u16)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] @@ -48861,14 +56190,12 @@ pub fn vreinterpretq_u32_p128(a: p128) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u32_p128(a: p128) -> uint32x4_t { - unsafe { - let ret_val: uint32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } +pub fn vreinterpret_p64_u16(a: uint16x4_t) -> poly64x1_t { + let a: uint16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_p128)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_u16)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] @@ -48886,11 +56213,11 @@ pub fn vreinterpretq_u32_p128(a: p128) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u64_p128(a: p128) -> uint64x2_t { +pub fn vreinterpretq_p128_u16(a: uint16x8_t) -> p128 { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_p128)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_u16)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] @@ -48908,14 +56235,12 @@ pub fn vreinterpretq_u64_p128(a: p128) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u64_p128(a: p128) -> uint64x2_t { - unsafe { - let ret_val: uint64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } +pub fn vreinterpretq_p128_u16(a: uint16x8_t) -> p128 { + let a: uint16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_p128)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_u16)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] @@ -48933,11 +56258,11 @@ pub fn vreinterpretq_u64_p128(a: p128) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p8_p128(a: p128) -> poly8x16_t { +pub fn vreinterpretq_p64_u16(a: uint16x8_t) -> poly64x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_p128)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_u16)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] @@ -48955,18 +56280,15 @@ pub fn vreinterpretq_p8_p128(a: p128) -> poly8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p8_p128(a: p128) -> poly8x16_t { +pub fn vreinterpretq_p64_u16(a: uint16x8_t) -> poly64x2_t { + let a: uint16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { - let ret_val: poly8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) + let ret_val: poly64x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_p128)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_u32)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] @@ -48984,11 +56306,11 @@ pub fn vreinterpretq_p8_p128(a: p128) -> poly8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p16_p128(a: p128) -> poly16x8_t { +pub fn vreinterpret_p64_u32(a: uint32x2_t) -> poly64x1_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_p128)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_u32)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] @@ -49006,14 +56328,12 @@ pub fn vreinterpretq_p16_p128(a: p128) -> poly16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p16_p128(a: p128) -> poly16x8_t { - unsafe { - let ret_val: poly16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } +pub fn vreinterpret_p64_u32(a: uint32x2_t) -> poly64x1_t { + let a: uint32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_p128)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_u32)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] @@ -49031,11 +56351,11 @@ pub fn vreinterpretq_p16_p128(a: p128) -> poly16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p64_p128(a: p128) -> poly64x2_t { +pub fn vreinterpretq_p128_u32(a: uint32x4_t) -> p128 { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_p128)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_u32)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] @@ -49053,14 +56373,12 @@ pub fn vreinterpretq_p64_p128(a: p128) -> poly64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p64_p128(a: p128) -> poly64x2_t { - unsafe { - let ret_val: poly64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } +pub fn vreinterpretq_p128_u32(a: uint32x4_t) -> p128 { + let a: uint32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_u32)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] @@ -49078,11 +56396,11 @@ pub fn vreinterpretq_p64_p128(a: p128) -> poly64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p64_s8(a: int8x8_t) -> poly64x1_t { +pub fn vreinterpretq_p64_u32(a: uint32x4_t) -> poly64x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_u32)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] @@ -49100,12 +56418,15 @@ pub fn vreinterpret_p64_s8(a: int8x8_t) -> poly64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p64_s8(a: int8x8_t) -> poly64x1_t { - let a: int8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { transmute(a) } +pub fn vreinterpretq_p64_u32(a: uint32x4_t) -> poly64x2_t { + let a: uint32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; + unsafe { + let ret_val: poly64x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) + } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_u64)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] @@ -49123,11 +56444,11 @@ pub fn vreinterpret_p64_s8(a: int8x8_t) -> poly64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p128_s8(a: int8x16_t) -> p128 { +pub fn vreinterpretq_p128_u64(a: uint64x2_t) -> p128 { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_u64)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] @@ -49145,13 +56466,12 @@ pub fn vreinterpretq_p128_s8(a: int8x16_t) -> p128 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p128_s8(a: int8x16_t) -> p128 { - let a: int8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; +pub fn vreinterpretq_p128_u64(a: uint64x2_t) -> p128 { + let a: uint64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_p8)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] @@ -49169,11 +56489,11 @@ pub fn vreinterpretq_p128_s8(a: int8x16_t) -> p128 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p64_s8(a: int8x16_t) -> poly64x2_t { +pub fn vreinterpret_p64_p8(a: poly8x8_t) -> poly64x1_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_p8)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] @@ -49191,16 +56511,12 @@ pub fn vreinterpretq_p64_s8(a: int8x16_t) -> poly64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p64_s8(a: int8x16_t) -> poly64x2_t { - let a: int8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: poly64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } +pub fn vreinterpret_p64_p8(a: poly8x8_t) -> poly64x1_t { + let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_p8)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] @@ -49218,11 +56534,11 @@ pub fn vreinterpretq_p64_s8(a: int8x16_t) -> poly64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p64_s16(a: int16x4_t) -> poly64x1_t { +pub fn vreinterpretq_p128_p8(a: poly8x16_t) -> p128 { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_p8)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] @@ -49240,12 +56556,13 @@ pub fn vreinterpret_p64_s16(a: int16x4_t) -> poly64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p64_s16(a: int16x4_t) -> poly64x1_t { - let a: int16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; +pub fn vreinterpretq_p128_p8(a: poly8x16_t) -> p128 { + let a: poly8x16_t = + unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_p8)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] @@ -49263,11 +56580,11 @@ pub fn vreinterpret_p64_s16(a: int16x4_t) -> poly64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p128_s16(a: int16x8_t) -> p128 { +pub fn vreinterpretq_p64_p8(a: poly8x16_t) -> poly64x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_p8)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] @@ -49285,12 +56602,16 @@ pub fn vreinterpretq_p128_s16(a: int16x8_t) -> p128 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p128_s16(a: int16x8_t) -> p128 { - let a: int16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { transmute(a) } +pub fn vreinterpretq_p64_p8(a: poly8x16_t) -> poly64x2_t { + let a: poly8x16_t = + unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; + unsafe { + let ret_val: poly64x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) + } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_p16)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] @@ -49308,11 +56629,11 @@ pub fn vreinterpretq_p128_s16(a: int16x8_t) -> p128 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p64_s16(a: int16x8_t) -> poly64x2_t { +pub fn vreinterpret_p64_p16(a: poly16x4_t) -> poly64x1_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_p16)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] @@ -49330,15 +56651,12 @@ pub fn vreinterpretq_p64_s16(a: int16x8_t) -> poly64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p64_s16(a: int16x8_t) -> poly64x2_t { - let a: int16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: poly64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } +pub fn vreinterpret_p64_p16(a: poly16x4_t) -> poly64x1_t { + let a: poly16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_p16)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] @@ -49356,11 +56674,11 @@ pub fn vreinterpretq_p64_s16(a: int16x8_t) -> poly64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p64_s32(a: int32x2_t) -> poly64x1_t { +pub fn vreinterpretq_p128_p16(a: poly16x8_t) -> p128 { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_p16)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] @@ -49378,12 +56696,12 @@ pub fn vreinterpret_p64_s32(a: int32x2_t) -> poly64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p64_s32(a: int32x2_t) -> poly64x1_t { - let a: int32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; +pub fn vreinterpretq_p128_p16(a: poly16x8_t) -> p128 { + let a: poly16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_p16)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] @@ -49401,11 +56719,11 @@ pub fn vreinterpret_p64_s32(a: int32x2_t) -> poly64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p128_s32(a: int32x4_t) -> p128 { +pub fn vreinterpretq_p64_p16(a: poly16x8_t) -> poly64x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_p16)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] @@ -49423,12 +56741,15 @@ pub fn vreinterpretq_p128_s32(a: int32x4_t) -> p128 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p128_s32(a: int32x4_t) -> p128 { - let a: int32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - unsafe { transmute(a) } +pub fn vreinterpretq_p64_p16(a: poly16x8_t) -> poly64x2_t { + let a: poly16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; + unsafe { + let ret_val: poly64x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) + } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_p64)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] @@ -49446,11 +56767,11 @@ pub fn vreinterpretq_p128_s32(a: int32x4_t) -> p128 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p64_s32(a: int32x4_t) -> poly64x2_t { +pub fn vreinterpret_s8_p64(a: poly64x1_t) -> int8x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_p64)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] @@ -49468,15 +56789,14 @@ pub fn vreinterpretq_p64_s32(a: int32x4_t) -> poly64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p64_s32(a: int32x4_t) -> poly64x2_t { - let a: int32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; +pub fn vreinterpret_s8_p64(a: poly64x1_t) -> int8x8_t { unsafe { - let ret_val: poly64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) + let ret_val: int8x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_s64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_p64)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] @@ -49494,11 +56814,11 @@ pub fn vreinterpretq_p64_s32(a: int32x4_t) -> poly64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p128_s64(a: int64x2_t) -> p128 { +pub fn vreinterpret_s16_p64(a: poly64x1_t) -> int16x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_s64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_p64)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] @@ -49516,12 +56836,14 @@ pub fn vreinterpretq_p128_s64(a: int64x2_t) -> p128 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p128_s64(a: int64x2_t) -> p128 { - let a: int64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - unsafe { transmute(a) } +pub fn vreinterpret_s16_p64(a: poly64x1_t) -> int16x4_t { + unsafe { + let ret_val: int16x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_p64)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] @@ -49539,11 +56861,11 @@ pub fn vreinterpretq_p128_s64(a: int64x2_t) -> p128 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p64_u8(a: uint8x8_t) -> poly64x1_t { +pub fn vreinterpret_s32_p64(a: poly64x1_t) -> int32x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_p64)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] @@ -49561,12 +56883,14 @@ pub fn vreinterpret_p64_u8(a: uint8x8_t) -> poly64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p64_u8(a: uint8x8_t) -> poly64x1_t { - let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { transmute(a) } +pub fn vreinterpret_s32_p64(a: poly64x1_t) -> int32x2_t { + unsafe { + let ret_val: int32x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) + } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_p64)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] @@ -49584,11 +56908,11 @@ pub fn vreinterpret_p64_u8(a: uint8x8_t) -> poly64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p128_u8(a: uint8x16_t) -> p128 { +pub fn vreinterpret_u8_p64(a: poly64x1_t) -> uint8x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_p64)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] @@ -49606,13 +56930,14 @@ pub fn vreinterpretq_p128_u8(a: uint8x16_t) -> p128 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p128_u8(a: uint8x16_t) -> p128 { - let a: uint8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { transmute(a) } +pub fn vreinterpret_u8_p64(a: poly64x1_t) -> uint8x8_t { + unsafe { + let ret_val: uint8x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_p64)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] @@ -49630,11 +56955,11 @@ pub fn vreinterpretq_p128_u8(a: uint8x16_t) -> p128 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p64_u8(a: uint8x16_t) -> poly64x2_t { +pub fn vreinterpret_u16_p64(a: poly64x1_t) -> uint16x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_p64)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] @@ -49652,16 +56977,14 @@ pub fn vreinterpretq_p64_u8(a: uint8x16_t) -> poly64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p64_u8(a: uint8x16_t) -> poly64x2_t { - let a: uint8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; +pub fn vreinterpret_u16_p64(a: poly64x1_t) -> uint16x4_t { unsafe { - let ret_val: poly64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) + let ret_val: uint16x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_p64)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] @@ -49679,11 +57002,11 @@ pub fn vreinterpretq_p64_u8(a: uint8x16_t) -> poly64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p64_u16(a: uint16x4_t) -> poly64x1_t { +pub fn vreinterpret_u32_p64(a: poly64x1_t) -> uint32x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_p64)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] @@ -49701,12 +57024,14 @@ pub fn vreinterpret_p64_u16(a: uint16x4_t) -> poly64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p64_u16(a: uint16x4_t) -> poly64x1_t { - let a: uint16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - unsafe { transmute(a) } +pub fn vreinterpret_u32_p64(a: poly64x1_t) -> uint32x2_t { + unsafe { + let ret_val: uint32x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) + } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_p64)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] @@ -49724,11 +57049,11 @@ pub fn vreinterpret_p64_u16(a: uint16x4_t) -> poly64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p128_u16(a: uint16x8_t) -> p128 { +pub fn vreinterpret_p8_p64(a: poly64x1_t) -> poly8x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_p64)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] @@ -49746,12 +57071,14 @@ pub fn vreinterpretq_p128_u16(a: uint16x8_t) -> p128 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p128_u16(a: uint16x8_t) -> p128 { - let a: uint16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { transmute(a) } +pub fn vreinterpret_p8_p64(a: poly64x1_t) -> poly8x8_t { + unsafe { + let ret_val: poly8x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_p64)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] @@ -49769,11 +57096,11 @@ pub fn vreinterpretq_p128_u16(a: uint16x8_t) -> p128 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p64_u16(a: uint16x8_t) -> poly64x2_t { +pub fn vreinterpret_p16_p64(a: poly64x1_t) -> poly16x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_p64)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] @@ -49791,15 +57118,14 @@ pub fn vreinterpretq_p64_u16(a: uint16x8_t) -> poly64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p64_u16(a: uint16x8_t) -> poly64x2_t { - let a: uint16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; +pub fn vreinterpret_p16_p64(a: poly64x1_t) -> poly16x4_t { unsafe { - let ret_val: poly64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) + let ret_val: poly16x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_p64)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] @@ -49817,11 +57143,11 @@ pub fn vreinterpretq_p64_u16(a: uint16x8_t) -> poly64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p64_u32(a: uint32x2_t) -> poly64x1_t { +pub fn vreinterpretq_p128_p64(a: poly64x2_t) -> p128 { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_p64)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] @@ -49839,12 +57165,12 @@ pub fn vreinterpret_p64_u32(a: uint32x2_t) -> poly64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p64_u32(a: uint32x2_t) -> poly64x1_t { - let a: uint32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; +pub fn vreinterpretq_p128_p64(a: poly64x2_t) -> p128 { + let a: poly64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_p64)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] @@ -49862,11 +57188,11 @@ pub fn vreinterpret_p64_u32(a: uint32x2_t) -> poly64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p128_u32(a: uint32x4_t) -> p128 { +pub fn vreinterpretq_s8_p64(a: poly64x2_t) -> int8x16_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_p64)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] @@ -49884,12 +57210,19 @@ pub fn vreinterpretq_p128_u32(a: uint32x4_t) -> p128 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p128_u32(a: uint32x4_t) -> p128 { - let a: uint32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - unsafe { transmute(a) } +pub fn vreinterpretq_s8_p64(a: poly64x2_t) -> int8x16_t { + let a: poly64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; + unsafe { + let ret_val: int8x16_t = transmute(a); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) + } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_p64)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] @@ -49907,11 +57240,11 @@ pub fn vreinterpretq_p128_u32(a: uint32x4_t) -> p128 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p64_u32(a: uint32x4_t) -> poly64x2_t { +pub fn vreinterpretq_s16_p64(a: poly64x2_t) -> int16x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_p64)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] @@ -49929,15 +57262,15 @@ pub fn vreinterpretq_p64_u32(a: uint32x4_t) -> poly64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p64_u32(a: uint32x4_t) -> poly64x2_t { - let a: uint32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; +pub fn vreinterpretq_s16_p64(a: poly64x2_t) -> int16x8_t { + let a: poly64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { - let ret_val: poly64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) + let ret_val: int16x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_u64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_p64)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] @@ -49955,11 +57288,11 @@ pub fn vreinterpretq_p64_u32(a: uint32x4_t) -> poly64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p128_u64(a: uint64x2_t) -> p128 { +pub fn vreinterpretq_s32_p64(a: poly64x2_t) -> int32x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_u64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_p64)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] @@ -49977,12 +57310,15 @@ pub fn vreinterpretq_p128_u64(a: uint64x2_t) -> p128 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p128_u64(a: uint64x2_t) -> p128 { - let a: uint64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - unsafe { transmute(a) } +pub fn vreinterpretq_s32_p64(a: poly64x2_t) -> int32x4_t { + let a: poly64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; + unsafe { + let ret_val: int32x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_p64)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] @@ -50000,11 +57336,11 @@ pub fn vreinterpretq_p128_u64(a: uint64x2_t) -> p128 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p64_p8(a: poly8x8_t) -> poly64x1_t { +pub fn vreinterpretq_u8_p64(a: poly64x2_t) -> uint8x16_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_p64)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] @@ -50022,12 +57358,19 @@ pub fn vreinterpret_p64_p8(a: poly8x8_t) -> poly64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p64_p8(a: poly8x8_t) -> poly64x1_t { - let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { transmute(a) } +pub fn vreinterpretq_u8_p64(a: poly64x2_t) -> uint8x16_t { + let a: poly64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; + unsafe { + let ret_val: uint8x16_t = transmute(a); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) + } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_p64)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] @@ -50045,11 +57388,11 @@ pub fn vreinterpret_p64_p8(a: poly8x8_t) -> poly64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p128_p8(a: poly8x16_t) -> p128 { +pub fn vreinterpretq_u16_p64(a: poly64x2_t) -> uint16x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_p64)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] @@ -50067,13 +57410,15 @@ pub fn vreinterpretq_p128_p8(a: poly8x16_t) -> p128 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p128_p8(a: poly8x16_t) -> p128 { - let a: poly8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { transmute(a) } +pub fn vreinterpretq_u16_p64(a: poly64x2_t) -> uint16x8_t { + let a: poly64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; + unsafe { + let ret_val: uint16x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_p64)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] @@ -50091,11 +57436,11 @@ pub fn vreinterpretq_p128_p8(a: poly8x16_t) -> p128 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p64_p8(a: poly8x16_t) -> poly64x2_t { +pub fn vreinterpretq_u32_p64(a: poly64x2_t) -> uint32x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_p64)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] @@ -50113,16 +57458,15 @@ pub fn vreinterpretq_p64_p8(a: poly8x16_t) -> poly64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p64_p8(a: poly8x16_t) -> poly64x2_t { - let a: poly8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; +pub fn vreinterpretq_u32_p64(a: poly64x2_t) -> uint32x4_t { + let a: poly64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { - let ret_val: poly64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) + let ret_val: uint32x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_p64)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] @@ -50140,11 +57484,11 @@ pub fn vreinterpretq_p64_p8(a: poly8x16_t) -> poly64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p64_p16(a: poly16x4_t) -> poly64x1_t { +pub fn vreinterpretq_p8_p64(a: poly64x2_t) -> poly8x16_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_p64)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] @@ -50162,12 +57506,19 @@ pub fn vreinterpret_p64_p16(a: poly16x4_t) -> poly64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p64_p16(a: poly16x4_t) -> poly64x1_t { - let a: poly16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - unsafe { transmute(a) } +pub fn vreinterpretq_p8_p64(a: poly64x2_t) -> poly8x16_t { + let a: poly64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; + unsafe { + let ret_val: poly8x16_t = transmute(a); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) + } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_p64)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] @@ -50185,11 +57536,11 @@ pub fn vreinterpret_p64_p16(a: poly16x4_t) -> poly64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p128_p16(a: poly16x8_t) -> p128 { +pub fn vreinterpretq_p16_p64(a: poly64x2_t) -> poly16x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_p64)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] @@ -50207,20 +57558,22 @@ pub fn vreinterpretq_p128_p16(a: poly16x8_t) -> p128 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p128_p16(a: poly16x8_t) -> p128 { - let a: poly16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_p16)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +pub fn vreinterpretq_p16_p64(a: poly64x2_t) -> poly16x8_t { + let a: poly64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; + unsafe { + let ret_val: poly16x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + } +} +#[doc = "Reversing vector elements (swap endianness)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev16_p8)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev16.8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(rev16) )] #[cfg_attr( not(target_arch = "arm"), @@ -50230,19 +57583,18 @@ pub fn vreinterpretq_p128_p16(a: poly16x8_t) -> p128 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p64_p16(a: poly16x8_t) -> poly64x2_t { - unsafe { transmute(a) } +pub fn vrev16_p8(a: poly8x8_t) -> poly8x8_t { + unsafe { simd_shuffle!(a, a, [1, 0, 3, 2, 5, 4, 7, 6]) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_p16)"] +#[doc = "Reversing vector elements (swap endianness)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev16_s8)"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev16.8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(rev16) )] #[cfg_attr( not(target_arch = "arm"), @@ -50252,23 +57604,18 @@ pub fn vreinterpretq_p64_p16(a: poly16x8_t) -> poly64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p64_p16(a: poly16x8_t) -> poly64x2_t { - let a: poly16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: poly64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } +pub fn vrev16_s8(a: int8x8_t) -> int8x8_t { + unsafe { simd_shuffle!(a, a, [1, 0, 3, 2, 5, 4, 7, 6]) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_p64)"] +#[doc = "Reversing vector elements (swap endianness)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev16_u8)"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev16.8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(rev16) )] #[cfg_attr( not(target_arch = "arm"), @@ -50278,19 +57625,18 @@ pub fn vreinterpretq_p64_p16(a: poly16x8_t) -> poly64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s8_p64(a: poly64x1_t) -> int8x8_t { - unsafe { transmute(a) } +pub fn vrev16_u8(a: uint8x8_t) -> uint8x8_t { + unsafe { simd_shuffle!(a, a, [1, 0, 3, 2, 5, 4, 7, 6]) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_p64)"] +#[doc = "Reversing vector elements (swap endianness)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev16q_p8)"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev16.8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(rev16) )] #[cfg_attr( not(target_arch = "arm"), @@ -50300,22 +57646,18 @@ pub fn vreinterpret_s8_p64(a: poly64x1_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s8_p64(a: poly64x1_t) -> int8x8_t { - unsafe { - let ret_val: int8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } +pub fn vrev16q_p8(a: poly8x16_t) -> poly8x16_t { + unsafe { simd_shuffle!(a, a, [1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14]) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_p64)"] +#[doc = "Reversing vector elements (swap endianness)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev16q_s8)"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev16.8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(rev16) )] #[cfg_attr( not(target_arch = "arm"), @@ -50325,19 +57667,18 @@ pub fn vreinterpret_s8_p64(a: poly64x1_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s16_p64(a: poly64x1_t) -> int16x4_t { - unsafe { transmute(a) } +pub fn vrev16q_s8(a: int8x16_t) -> int8x16_t { + unsafe { simd_shuffle!(a, a, [1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14]) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_p64)"] +#[doc = "Reversing vector elements (swap endianness)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev16q_u8)"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev16.8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(rev16) )] #[cfg_attr( not(target_arch = "arm"), @@ -50347,22 +57688,18 @@ pub fn vreinterpret_s16_p64(a: poly64x1_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s16_p64(a: poly64x1_t) -> int16x4_t { - unsafe { - let ret_val: int16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } +pub fn vrev16q_u8(a: uint8x16_t) -> uint8x16_t { + unsafe { simd_shuffle!(a, a, [1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14]) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_p64)"] +#[doc = "Reversing vector elements (swap endianness)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev32_p16)"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(rev32) )] #[cfg_attr( not(target_arch = "arm"), @@ -50372,19 +57709,18 @@ pub fn vreinterpret_s16_p64(a: poly64x1_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s32_p64(a: poly64x1_t) -> int32x2_t { - unsafe { transmute(a) } +pub fn vrev32_p16(a: poly16x4_t) -> poly16x4_t { + unsafe { simd_shuffle!(a, a, [1, 0, 3, 2]) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_p64)"] +#[doc = "Reversing vector elements (swap endianness)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev32_p8)"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(rev32) )] #[cfg_attr( not(target_arch = "arm"), @@ -50394,22 +57730,18 @@ pub fn vreinterpret_s32_p64(a: poly64x1_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s32_p64(a: poly64x1_t) -> int32x2_t { - unsafe { - let ret_val: int32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } +pub fn vrev32_p8(a: poly8x8_t) -> poly8x8_t { + unsafe { simd_shuffle!(a, a, [3, 2, 1, 0, 7, 6, 5, 4]) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_p64)"] +#[doc = "Reversing vector elements (swap endianness)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev32_s16)"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(rev32) )] #[cfg_attr( not(target_arch = "arm"), @@ -50419,19 +57751,18 @@ pub fn vreinterpret_s32_p64(a: poly64x1_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u8_p64(a: poly64x1_t) -> uint8x8_t { - unsafe { transmute(a) } +pub fn vrev32_s16(a: int16x4_t) -> int16x4_t { + unsafe { simd_shuffle!(a, a, [1, 0, 3, 2]) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_p64)"] +#[doc = "Reversing vector elements (swap endianness)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev32_s8)"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(rev32) )] #[cfg_attr( not(target_arch = "arm"), @@ -50441,22 +57772,18 @@ pub fn vreinterpret_u8_p64(a: poly64x1_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u8_p64(a: poly64x1_t) -> uint8x8_t { - unsafe { - let ret_val: uint8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } +pub fn vrev32_s8(a: int8x8_t) -> int8x8_t { + unsafe { simd_shuffle!(a, a, [3, 2, 1, 0, 7, 6, 5, 4]) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_p64)"] +#[doc = "Reversing vector elements (swap endianness)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev32_u16)"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(rev32) )] #[cfg_attr( not(target_arch = "arm"), @@ -50466,19 +57793,18 @@ pub fn vreinterpret_u8_p64(a: poly64x1_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u16_p64(a: poly64x1_t) -> uint16x4_t { - unsafe { transmute(a) } +pub fn vrev32_u16(a: uint16x4_t) -> uint16x4_t { + unsafe { simd_shuffle!(a, a, [1, 0, 3, 2]) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_p64)"] +#[doc = "Reversing vector elements (swap endianness)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev32_u8)"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(rev32) )] #[cfg_attr( not(target_arch = "arm"), @@ -50488,22 +57814,18 @@ pub fn vreinterpret_u16_p64(a: poly64x1_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u16_p64(a: poly64x1_t) -> uint16x4_t { - unsafe { - let ret_val: uint16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } +pub fn vrev32_u8(a: uint8x8_t) -> uint8x8_t { + unsafe { simd_shuffle!(a, a, [3, 2, 1, 0, 7, 6, 5, 4]) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_p64)"] +#[doc = "Reversing vector elements (swap endianness)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev32q_p16)"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(rev32) )] #[cfg_attr( not(target_arch = "arm"), @@ -50513,19 +57835,18 @@ pub fn vreinterpret_u16_p64(a: poly64x1_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u32_p64(a: poly64x1_t) -> uint32x2_t { - unsafe { transmute(a) } +pub fn vrev32q_p16(a: poly16x8_t) -> poly16x8_t { + unsafe { simd_shuffle!(a, a, [1, 0, 3, 2, 5, 4, 7, 6]) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_p64)"] +#[doc = "Reversing vector elements (swap endianness)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev32q_p8)"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(rev32) )] #[cfg_attr( not(target_arch = "arm"), @@ -50535,22 +57856,18 @@ pub fn vreinterpret_u32_p64(a: poly64x1_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u32_p64(a: poly64x1_t) -> uint32x2_t { - unsafe { - let ret_val: uint32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } +pub fn vrev32q_p8(a: poly8x16_t) -> poly8x16_t { + unsafe { simd_shuffle!(a, a, [3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12]) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_p64)"] +#[doc = "Reversing vector elements (swap endianness)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev32q_s16)"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(rev32) )] #[cfg_attr( not(target_arch = "arm"), @@ -50560,19 +57877,18 @@ pub fn vreinterpret_u32_p64(a: poly64x1_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p8_p64(a: poly64x1_t) -> poly8x8_t { - unsafe { transmute(a) } +pub fn vrev32q_s16(a: int16x8_t) -> int16x8_t { + unsafe { simd_shuffle!(a, a, [1, 0, 3, 2, 5, 4, 7, 6]) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_p64)"] +#[doc = "Reversing vector elements (swap endianness)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev32q_s8)"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(rev32) )] #[cfg_attr( not(target_arch = "arm"), @@ -50582,22 +57898,18 @@ pub fn vreinterpret_p8_p64(a: poly64x1_t) -> poly8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p8_p64(a: poly64x1_t) -> poly8x8_t { - unsafe { - let ret_val: poly8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } +pub fn vrev32q_s8(a: int8x16_t) -> int8x16_t { + unsafe { simd_shuffle!(a, a, [3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12]) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_p64)"] +#[doc = "Reversing vector elements (swap endianness)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev32q_u16)"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(rev32) )] #[cfg_attr( not(target_arch = "arm"), @@ -50607,19 +57919,18 @@ pub fn vreinterpret_p8_p64(a: poly64x1_t) -> poly8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p16_p64(a: poly64x1_t) -> poly16x4_t { - unsafe { transmute(a) } +pub fn vrev32q_u16(a: uint16x8_t) -> uint16x8_t { + unsafe { simd_shuffle!(a, a, [1, 0, 3, 2, 5, 4, 7, 6]) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_p64)"] +#[doc = "Reversing vector elements (swap endianness)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev32q_u8)"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(rev32) )] #[cfg_attr( not(target_arch = "arm"), @@ -50629,22 +57940,18 @@ pub fn vreinterpret_p16_p64(a: poly64x1_t) -> poly16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p16_p64(a: poly64x1_t) -> poly16x4_t { - unsafe { - let ret_val: poly16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } +pub fn vrev32q_u8(a: uint8x16_t) -> uint8x16_t { + unsafe { simd_shuffle!(a, a, [3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12]) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_p64)"] +#[doc = "Reversing vector elements (swap endianness)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev64_f32)"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(rev64) )] #[cfg_attr( not(target_arch = "arm"), @@ -50654,19 +57961,18 @@ pub fn vreinterpret_p16_p64(a: poly64x1_t) -> poly16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p128_p64(a: poly64x2_t) -> p128 { - unsafe { transmute(a) } +pub fn vrev64_f32(a: float32x2_t) -> float32x2_t { + unsafe { simd_shuffle!(a, a, [1, 0]) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_p64)"] +#[doc = "Reversing vector elements (swap endianness)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev64_p16)"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(rev64) )] #[cfg_attr( not(target_arch = "arm"), @@ -50676,20 +57982,18 @@ pub fn vreinterpretq_p128_p64(a: poly64x2_t) -> p128 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p128_p64(a: poly64x2_t) -> p128 { - let a: poly64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - unsafe { transmute(a) } +pub fn vrev64_p16(a: poly16x4_t) -> poly16x4_t { + unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_p64)"] +#[doc = "Reversing vector elements (swap endianness)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev64_p8)"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(rev64) )] #[cfg_attr( not(target_arch = "arm"), @@ -50699,19 +58003,18 @@ pub fn vreinterpretq_p128_p64(a: poly64x2_t) -> p128 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s8_p64(a: poly64x2_t) -> int8x16_t { - unsafe { transmute(a) } +pub fn vrev64_p8(a: poly8x8_t) -> poly8x8_t { + unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_p64)"] +#[doc = "Reversing vector elements (swap endianness)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev64_s16)"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(rev64) )] #[cfg_attr( not(target_arch = "arm"), @@ -50721,27 +58024,18 @@ pub fn vreinterpretq_s8_p64(a: poly64x2_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s8_p64(a: poly64x2_t) -> int8x16_t { - let a: poly64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - unsafe { - let ret_val: int8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - } +pub fn vrev64_s16(a: int16x4_t) -> int16x4_t { + unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_p64)"] +#[doc = "Reversing vector elements (swap endianness)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev64_s32)"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(rev64) )] #[cfg_attr( not(target_arch = "arm"), @@ -50751,19 +58045,18 @@ pub fn vreinterpretq_s8_p64(a: poly64x2_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s16_p64(a: poly64x2_t) -> int16x8_t { - unsafe { transmute(a) } +pub fn vrev64_s32(a: int32x2_t) -> int32x2_t { + unsafe { simd_shuffle!(a, a, [1, 0]) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_p64)"] +#[doc = "Reversing vector elements (swap endianness)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev64_s8)"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(rev64) )] #[cfg_attr( not(target_arch = "arm"), @@ -50773,23 +58066,18 @@ pub fn vreinterpretq_s16_p64(a: poly64x2_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s16_p64(a: poly64x2_t) -> int16x8_t { - let a: poly64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - unsafe { - let ret_val: int16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } +pub fn vrev64_s8(a: int8x8_t) -> int8x8_t { + unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_p64)"] +#[doc = "Reversing vector elements (swap endianness)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev64_u16)"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(rev64) )] #[cfg_attr( not(target_arch = "arm"), @@ -50799,19 +58087,18 @@ pub fn vreinterpretq_s16_p64(a: poly64x2_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s32_p64(a: poly64x2_t) -> int32x4_t { - unsafe { transmute(a) } +pub fn vrev64_u16(a: uint16x4_t) -> uint16x4_t { + unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_p64)"] +#[doc = "Reversing vector elements (swap endianness)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev64_u32)"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(rev64) )] #[cfg_attr( not(target_arch = "arm"), @@ -50821,23 +58108,18 @@ pub fn vreinterpretq_s32_p64(a: poly64x2_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s32_p64(a: poly64x2_t) -> int32x4_t { - let a: poly64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - unsafe { - let ret_val: int32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } +pub fn vrev64_u32(a: uint32x2_t) -> uint32x2_t { + unsafe { simd_shuffle!(a, a, [1, 0]) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_p64)"] +#[doc = "Reversing vector elements (swap endianness)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev64_u8)"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(rev64) )] #[cfg_attr( not(target_arch = "arm"), @@ -50847,19 +58129,18 @@ pub fn vreinterpretq_s32_p64(a: poly64x2_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u8_p64(a: poly64x2_t) -> uint8x16_t { - unsafe { transmute(a) } +pub fn vrev64_u8(a: uint8x8_t) -> uint8x8_t { + unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_p64)"] +#[doc = "Reversing vector elements (swap endianness)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev64q_f32)"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(rev64) )] #[cfg_attr( not(target_arch = "arm"), @@ -50869,27 +58150,18 @@ pub fn vreinterpretq_u8_p64(a: poly64x2_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u8_p64(a: poly64x2_t) -> uint8x16_t { - let a: poly64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - unsafe { - let ret_val: uint8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - } +pub fn vrev64q_f32(a: float32x4_t) -> float32x4_t { + unsafe { simd_shuffle!(a, a, [1, 0, 3, 2]) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_p64)"] +#[doc = "Reversing vector elements (swap endianness)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev64q_p16)"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(rev64) )] #[cfg_attr( not(target_arch = "arm"), @@ -50899,19 +58171,18 @@ pub fn vreinterpretq_u8_p64(a: poly64x2_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u16_p64(a: poly64x2_t) -> uint16x8_t { - unsafe { transmute(a) } +pub fn vrev64q_p16(a: poly16x8_t) -> poly16x8_t { + unsafe { simd_shuffle!(a, a, [3, 2, 1, 0, 7, 6, 5, 4]) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_p64)"] +#[doc = "Reversing vector elements (swap endianness)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev64q_p8)"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(rev64) )] #[cfg_attr( not(target_arch = "arm"), @@ -50921,23 +58192,18 @@ pub fn vreinterpretq_u16_p64(a: poly64x2_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u16_p64(a: poly64x2_t) -> uint16x8_t { - let a: poly64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - unsafe { - let ret_val: uint16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } +pub fn vrev64q_p8(a: poly8x16_t) -> poly8x16_t { + unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8]) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_p64)"] +#[doc = "Reversing vector elements (swap endianness)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev64q_s16)"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(rev64) )] #[cfg_attr( not(target_arch = "arm"), @@ -50947,19 +58213,18 @@ pub fn vreinterpretq_u16_p64(a: poly64x2_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u32_p64(a: poly64x2_t) -> uint32x4_t { - unsafe { transmute(a) } +pub fn vrev64q_s16(a: int16x8_t) -> int16x8_t { + unsafe { simd_shuffle!(a, a, [3, 2, 1, 0, 7, 6, 5, 4]) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_p64)"] +#[doc = "Reversing vector elements (swap endianness)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev64q_s32)"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(rev64) )] #[cfg_attr( not(target_arch = "arm"), @@ -50969,23 +58234,18 @@ pub fn vreinterpretq_u32_p64(a: poly64x2_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u32_p64(a: poly64x2_t) -> uint32x4_t { - let a: poly64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - unsafe { - let ret_val: uint32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } +pub fn vrev64q_s32(a: int32x4_t) -> int32x4_t { + unsafe { simd_shuffle!(a, a, [1, 0, 3, 2]) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_p64)"] +#[doc = "Reversing vector elements (swap endianness)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev64q_s8)"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(rev64) )] #[cfg_attr( not(target_arch = "arm"), @@ -50995,19 +58255,18 @@ pub fn vreinterpretq_u32_p64(a: poly64x2_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p8_p64(a: poly64x2_t) -> poly8x16_t { - unsafe { transmute(a) } +pub fn vrev64q_s8(a: int8x16_t) -> int8x16_t { + unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8]) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_p64)"] +#[doc = "Reversing vector elements (swap endianness)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev64q_u16)"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(rev64) )] #[cfg_attr( not(target_arch = "arm"), @@ -51017,27 +58276,18 @@ pub fn vreinterpretq_p8_p64(a: poly64x2_t) -> poly8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p8_p64(a: poly64x2_t) -> poly8x16_t { - let a: poly64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - unsafe { - let ret_val: poly8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - } +pub fn vrev64q_u16(a: uint16x8_t) -> uint16x8_t { + unsafe { simd_shuffle!(a, a, [3, 2, 1, 0, 7, 6, 5, 4]) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_p64)"] +#[doc = "Reversing vector elements (swap endianness)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev64q_u32)"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(rev64) )] #[cfg_attr( not(target_arch = "arm"), @@ -51047,19 +58297,18 @@ pub fn vreinterpretq_p8_p64(a: poly64x2_t) -> poly8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p16_p64(a: poly64x2_t) -> poly16x8_t { - unsafe { transmute(a) } +pub fn vrev64q_u32(a: uint32x4_t) -> uint32x4_t { + unsafe { simd_shuffle!(a, a, [1, 0, 3, 2]) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_p64)"] +#[doc = "Reversing vector elements (swap endianness)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev64q_u8)"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(rev64) )] #[cfg_attr( not(target_arch = "arm"), @@ -51069,12 +58318,8 @@ pub fn vreinterpretq_p16_p64(a: poly64x2_t) -> poly16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p16_p64(a: poly64x2_t) -> poly16x8_t { - let a: poly64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - unsafe { - let ret_val: poly16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } +pub fn vrev64q_u8(a: uint8x16_t) -> uint8x16_t { + unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8]) } } #[doc = "Reverse elements in 64-bit doublewords"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev64_f16)"] @@ -64294,6 +71539,29 @@ pub unsafe fn vst4_p16(a: *mut p16, b: poly16x4x4_t) { pub unsafe fn vst4q_p16(a: *mut p16, b: poly16x8x4_t) { vst4q_s16(transmute(a), transmute(b)) } +#[doc = "Store SIMD&FP register (immediate offset)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vstrq_p128)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vstrq_p128(a: *mut p128, b: p128) { + *a = b +} #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_f16)"] #[inline] diff --git a/crates/core_arch/src/arm_shared/neon/mod.rs b/crates/core_arch/src/arm_shared/neon/mod.rs index e424f0eb0f..7a3e727b8c 100644 --- a/crates/core_arch/src/arm_shared/neon/mod.rs +++ b/crates/core_arch/src/arm_shared/neon/mod.rs @@ -1190,7099 +1190,6 @@ impl_sign_conversions_neon! { (uint8x8x4_t, int8x8x4_t) } -/// Load one single-element structure to one lane of one register. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8", LANE = 7))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1, LANE = 7) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld1_lane_s8(ptr: *const i8, src: int8x8_t) -> int8x8_t { - static_assert_uimm_bits!(LANE, 3); - simd_insert!(src, LANE as u32, *ptr) -} - -/// Load one single-element structure to one lane of one register. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8", LANE = 15))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1, LANE = 15) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld1q_lane_s8(ptr: *const i8, src: int8x16_t) -> int8x16_t { - static_assert_uimm_bits!(LANE, 4); - simd_insert!(src, LANE as u32, *ptr) -} - -/// Load one single-element structure to one lane of one register. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16", LANE = 3))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1, LANE = 3) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld1_lane_s16(ptr: *const i16, src: int16x4_t) -> int16x4_t { - static_assert_uimm_bits!(LANE, 2); - simd_insert!(src, LANE as u32, *ptr) -} - -/// Load one single-element structure to one lane of one register. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16", LANE = 7))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1, LANE = 7) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld1q_lane_s16(ptr: *const i16, src: int16x8_t) -> int16x8_t { - static_assert_uimm_bits!(LANE, 3); - simd_insert!(src, LANE as u32, *ptr) -} - -/// Load one single-element structure to one lane of one register. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1, LANE = 1) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld1_lane_s32(ptr: *const i32, src: int32x2_t) -> int32x2_t { - static_assert_uimm_bits!(LANE, 1); - simd_insert!(src, LANE as u32, *ptr) -} - -/// Load one single-element structure to one lane of one register. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32", LANE = 3))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1, LANE = 3) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld1q_lane_s32(ptr: *const i32, src: int32x4_t) -> int32x4_t { - static_assert_uimm_bits!(LANE, 2); - simd_insert!(src, LANE as u32, *ptr) -} - -/// Load one single-element structure to one lane of one register. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vldr", LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ldr, LANE = 0) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld1_lane_s64(ptr: *const i64, src: int64x1_t) -> int64x1_t { - static_assert!(LANE == 0); - simd_insert!(src, LANE as u32, *ptr) -} - -/// Load one single-element structure to one lane of one register. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vldr", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1, LANE = 1) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld1q_lane_s64(ptr: *const i64, src: int64x2_t) -> int64x2_t { - static_assert_uimm_bits!(LANE, 1); - simd_insert!(src, LANE as u32, *ptr) -} - -/// Load one single-element structure to one lane of one register. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8", LANE = 7))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1, LANE = 7) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld1_lane_u8(ptr: *const u8, src: uint8x8_t) -> uint8x8_t { - static_assert_uimm_bits!(LANE, 3); - simd_insert!(src, LANE as u32, *ptr) -} - -/// Load one single-element structure to one lane of one register. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8", LANE = 15))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1, LANE = 15) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld1q_lane_u8(ptr: *const u8, src: uint8x16_t) -> uint8x16_t { - static_assert_uimm_bits!(LANE, 4); - simd_insert!(src, LANE as u32, *ptr) -} - -/// Load one single-element structure to one lane of one register. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16", LANE = 3))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1, LANE = 3) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld1_lane_u16(ptr: *const u16, src: uint16x4_t) -> uint16x4_t { - static_assert_uimm_bits!(LANE, 2); - simd_insert!(src, LANE as u32, *ptr) -} - -/// Load one single-element structure to one lane of one register. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16", LANE = 7))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1, LANE = 7) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld1q_lane_u16(ptr: *const u16, src: uint16x8_t) -> uint16x8_t { - static_assert_uimm_bits!(LANE, 3); - simd_insert!(src, LANE as u32, *ptr) -} - -/// Load one single-element structure to one lane of one register. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1, LANE = 1) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld1_lane_u32(ptr: *const u32, src: uint32x2_t) -> uint32x2_t { - static_assert_uimm_bits!(LANE, 1); - simd_insert!(src, LANE as u32, *ptr) -} - -/// Load one single-element structure to one lane of one register. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32", LANE = 3))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1, LANE = 3) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld1q_lane_u32(ptr: *const u32, src: uint32x4_t) -> uint32x4_t { - static_assert_uimm_bits!(LANE, 2); - simd_insert!(src, LANE as u32, *ptr) -} - -/// Load one single-element structure to one lane of one register. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vldr", LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ldr, LANE = 0) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld1_lane_u64(ptr: *const u64, src: uint64x1_t) -> uint64x1_t { - static_assert!(LANE == 0); - simd_insert!(src, LANE as u32, *ptr) -} - -/// Load one single-element structure to one lane of one register. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vldr", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1, LANE = 1) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld1q_lane_u64(ptr: *const u64, src: uint64x2_t) -> uint64x2_t { - static_assert_uimm_bits!(LANE, 1); - simd_insert!(src, LANE as u32, *ptr) -} - -/// Load one single-element structure to one lane of one register. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8", LANE = 7))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1, LANE = 7) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld1_lane_p8(ptr: *const p8, src: poly8x8_t) -> poly8x8_t { - static_assert_uimm_bits!(LANE, 3); - simd_insert!(src, LANE as u32, *ptr) -} - -/// Load one single-element structure to one lane of one register. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8", LANE = 15))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1, LANE = 15) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld1q_lane_p8(ptr: *const p8, src: poly8x16_t) -> poly8x16_t { - static_assert_uimm_bits!(LANE, 4); - simd_insert!(src, LANE as u32, *ptr) -} - -/// Load one single-element structure to one lane of one register. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16", LANE = 3))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1, LANE = 3) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld1_lane_p16(ptr: *const p16, src: poly16x4_t) -> poly16x4_t { - static_assert_uimm_bits!(LANE, 2); - simd_insert!(src, LANE as u32, *ptr) -} - -/// Load one single-element structure to one lane of one register. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16", LANE = 7))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1, LANE = 7) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld1q_lane_p16(ptr: *const p16, src: poly16x8_t) -> poly16x8_t { - static_assert_uimm_bits!(LANE, 3); - simd_insert!(src, LANE as u32, *ptr) -} - -/// Load one single-element structure to one lane of one register. -/// -/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_lane_p64) -#[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vldr", LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ldr, LANE = 0) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld1_lane_p64(ptr: *const p64, src: poly64x1_t) -> poly64x1_t { - static_assert!(LANE == 0); - simd_insert!(src, LANE as u32, *ptr) -} - -/// Load one single-element structure to one lane of one register. -/// -/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_lane_p64) -#[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vldr", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1, LANE = 1) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld1q_lane_p64(ptr: *const p64, src: poly64x2_t) -> poly64x2_t { - static_assert_uimm_bits!(LANE, 1); - simd_insert!(src, LANE as u32, *ptr) -} - -/// Load one single-element structure to one lane of one register. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1, LANE = 1) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld1_lane_f32(ptr: *const f32, src: float32x2_t) -> float32x2_t { - static_assert_uimm_bits!(LANE, 1); - simd_insert!(src, LANE as u32, *ptr) -} - -/// Load one single-element structure to one lane of one register. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32", LANE = 3))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1, LANE = 3) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld1q_lane_f32(ptr: *const f32, src: float32x4_t) -> float32x4_t { - static_assert_uimm_bits!(LANE, 2); - simd_insert!(src, LANE as u32, *ptr) -} - -/// Load one single-element structure and Replicate to all lanes (of one register). -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1r) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld1_dup_s8(ptr: *const i8) -> int8x8_t { - let x = vld1_lane_s8::<0>(ptr, transmute(i8x8::splat(0))); - simd_shuffle!(x, x, [0, 0, 0, 0, 0, 0, 0, 0]) -} - -/// Load one single-element structure and Replicate to all lanes (of one register). -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1r) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld1q_dup_s8(ptr: *const i8) -> int8x16_t { - let x = vld1q_lane_s8::<0>(ptr, transmute(i8x16::splat(0))); - simd_shuffle!(x, x, [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) -} - -/// Load one single-element structure and Replicate to all lanes (of one register). -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1r) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld1_dup_s16(ptr: *const i16) -> int16x4_t { - let x = vld1_lane_s16::<0>(ptr, transmute(i16x4::splat(0))); - simd_shuffle!(x, x, [0, 0, 0, 0]) -} - -/// Load one single-element structure and Replicate to all lanes (of one register). -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1r) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld1q_dup_s16(ptr: *const i16) -> int16x8_t { - let x = vld1q_lane_s16::<0>(ptr, transmute(i16x8::splat(0))); - simd_shuffle!(x, x, [0, 0, 0, 0, 0, 0, 0, 0]) -} - -/// Load one single-element structure and Replicate to all lanes (of one register). -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1r) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld1_dup_s32(ptr: *const i32) -> int32x2_t { - let x = vld1_lane_s32::<0>(ptr, transmute(i32x2::splat(0))); - simd_shuffle!(x, x, [0, 0]) -} - -/// Load one single-element structure and Replicate to all lanes (of one register). -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1r) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld1q_dup_s32(ptr: *const i32) -> int32x4_t { - let x = vld1q_lane_s32::<0>(ptr, transmute(i32x4::splat(0))); - simd_shuffle!(x, x, [0, 0, 0, 0]) -} - -/// Load one single-element structure and Replicate to all lanes (of one register). -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vldr"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ldr) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld1_dup_s64(ptr: *const i64) -> int64x1_t { - #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] - { - crate::core_arch::aarch64::vld1_s64(ptr) - } - #[cfg(target_arch = "arm")] - { - crate::core_arch::arm::vld1_s64(ptr) - } -} - -/// Load one single-element structure and Replicate to all lanes (of one register). -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vldr"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1r) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld1q_dup_s64(ptr: *const i64) -> int64x2_t { - let x = vld1q_lane_s64::<0>(ptr, transmute(i64x2::splat(0))); - simd_shuffle!(x, x, [0, 0]) -} - -/// Load one single-element structure and Replicate to all lanes (of one register). -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1r) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld1_dup_u8(ptr: *const u8) -> uint8x8_t { - let x = vld1_lane_u8::<0>(ptr, transmute(u8x8::splat(0))); - simd_shuffle!(x, x, [0, 0, 0, 0, 0, 0, 0, 0]) -} - -/// Load one single-element structure and Replicate to all lanes (of one register). -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1r) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld1q_dup_u8(ptr: *const u8) -> uint8x16_t { - let x = vld1q_lane_u8::<0>(ptr, transmute(u8x16::splat(0))); - simd_shuffle!(x, x, [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) -} - -/// Load one single-element structure and Replicate to all lanes (of one register). -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1r) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld1_dup_u16(ptr: *const u16) -> uint16x4_t { - let x = vld1_lane_u16::<0>(ptr, transmute(u16x4::splat(0))); - simd_shuffle!(x, x, [0, 0, 0, 0]) -} - -/// Load one single-element structure and Replicate to all lanes (of one register). -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1r) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld1q_dup_u16(ptr: *const u16) -> uint16x8_t { - let x = vld1q_lane_u16::<0>(ptr, transmute(u16x8::splat(0))); - simd_shuffle!(x, x, [0, 0, 0, 0, 0, 0, 0, 0]) -} - -/// Load one single-element structure and Replicate to all lanes (of one register). -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1r) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld1_dup_u32(ptr: *const u32) -> uint32x2_t { - let x = vld1_lane_u32::<0>(ptr, transmute(u32x2::splat(0))); - simd_shuffle!(x, x, [0, 0]) -} - -/// Load one single-element structure and Replicate to all lanes (of one register). -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1r) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld1q_dup_u32(ptr: *const u32) -> uint32x4_t { - let x = vld1q_lane_u32::<0>(ptr, transmute(u32x4::splat(0))); - simd_shuffle!(x, x, [0, 0, 0, 0]) -} - -/// Load one single-element structure and Replicate to all lanes (of one register). -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vldr"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ldr) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld1_dup_u64(ptr: *const u64) -> uint64x1_t { - #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] - { - crate::core_arch::aarch64::vld1_u64(ptr) - } - #[cfg(target_arch = "arm")] - { - crate::core_arch::arm::vld1_u64(ptr) - } -} - -/// Load one single-element structure and Replicate to all lanes (of one register). -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vldr"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1r) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld1q_dup_u64(ptr: *const u64) -> uint64x2_t { - let x = vld1q_lane_u64::<0>(ptr, transmute(u64x2::splat(0))); - simd_shuffle!(x, x, [0, 0]) -} - -/// Load one single-element structure and Replicate to all lanes (of one register). -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1r) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld1_dup_p8(ptr: *const p8) -> poly8x8_t { - let x = vld1_lane_p8::<0>(ptr, transmute(u8x8::splat(0))); - simd_shuffle!(x, x, [0, 0, 0, 0, 0, 0, 0, 0]) -} - -/// Load one single-element structure and Replicate to all lanes (of one register). -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1r) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld1q_dup_p8(ptr: *const p8) -> poly8x16_t { - let x = vld1q_lane_p8::<0>(ptr, transmute(u8x16::splat(0))); - simd_shuffle!(x, x, [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) -} - -/// Load one single-element structure and Replicate to all lanes (of one register). -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1r) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld1_dup_p16(ptr: *const p16) -> poly16x4_t { - let x = vld1_lane_p16::<0>(ptr, transmute(u16x4::splat(0))); - simd_shuffle!(x, x, [0, 0, 0, 0]) -} - -/// Load one single-element structure and Replicate to all lanes (of one register). -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1r) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld1q_dup_p16(ptr: *const p16) -> poly16x8_t { - let x = vld1q_lane_p16::<0>(ptr, transmute(u16x8::splat(0))); - simd_shuffle!(x, x, [0, 0, 0, 0, 0, 0, 0, 0]) -} - -/// Load one single-element structure and Replicate to all lanes (of one register). -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1r) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld1_dup_f32(ptr: *const f32) -> float32x2_t { - let x = vld1_lane_f32::<0>(ptr, transmute(f32x2::splat(0.))); - simd_shuffle!(x, x, [0, 0]) -} - -/// Load one single-element structure and Replicate to all lanes (of one register). -/// -/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_dup_p64) -#[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vldr"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ldr) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld1_dup_p64(ptr: *const p64) -> poly64x1_t { - #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] - { - crate::core_arch::aarch64::vld1_p64(ptr) - } - #[cfg(target_arch = "arm")] - { - crate::core_arch::arm::vld1_p64(ptr) - } -} - -/// Load one single-element structure and Replicate to all lanes (of one register). -/// -/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_dup_p64) -#[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vldr"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1r) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld1q_dup_p64(ptr: *const p64) -> poly64x2_t { - let x = vld1q_lane_p64::<0>(ptr, transmute(u64x2::splat(0))); - simd_shuffle!(x, x, [0, 0]) -} - -/// Load one single-element structure and Replicate to all lanes (of one register). -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1r) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld1q_dup_f32(ptr: *const f32) -> float32x4_t { - let x = vld1q_lane_f32::<0>(ptr, transmute(f32x4::splat(0.))); - simd_shuffle!(x, x, [0, 0, 0, 0]) -} - -// signed absolute difference and accumulate (64-bit) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vaba.s8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr("saba") -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vaba_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { - unsafe { simd_add(a, vabd_s8(b, c)) } -} -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vaba.s16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr("saba") -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vaba_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { - unsafe { simd_add(a, vabd_s16(b, c)) } -} -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vaba.s32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr("saba") -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vaba_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { - unsafe { simd_add(a, vabd_s32(b, c)) } -} -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vaba.u8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr("uaba") -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vaba_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t { - unsafe { simd_add(a, vabd_u8(b, c)) } -} -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vaba.u16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr("uaba") -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vaba_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x4_t) -> uint16x4_t { - unsafe { simd_add(a, vabd_u16(b, c)) } -} -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vaba.u32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr("uaba") -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vaba_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x2_t) -> uint32x2_t { - unsafe { simd_add(a, vabd_u32(b, c)) } -} -// signed absolute difference and accumulate (128-bit) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vaba.s8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr("saba") -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vabaq_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t { - unsafe { simd_add(a, vabdq_s8(b, c)) } -} -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vaba.s16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr("saba") -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vabaq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { - unsafe { simd_add(a, vabdq_s16(b, c)) } -} -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vaba.s32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr("saba") -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vabaq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { - unsafe { simd_add(a, vabdq_s32(b, c)) } -} -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vaba.u8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr("uaba") -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vabaq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t { - unsafe { simd_add(a, vabdq_u8(b, c)) } -} -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vaba.u16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr("uaba") -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vabaq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t { - unsafe { simd_add(a, vabdq_u16(b, c)) } -} -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vaba.u32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr("uaba") -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vabaq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { - unsafe { simd_add(a, vabdq_u32(b, c)) } -} - -/// Vector add. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vadd))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(add) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - unsafe { simd_add(a, b) } -} - -/// Vector add. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vadd))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(add) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - unsafe { simd_add(a, b) } -} - -/// Vector add. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vadd))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(add) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - unsafe { simd_add(a, b) } -} - -/// Vector add. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vadd))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(add) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - unsafe { simd_add(a, b) } -} - -/// Vector add. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vadd))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(add) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - unsafe { simd_add(a, b) } -} - -/// Vector add. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vadd))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(add) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - unsafe { simd_add(a, b) } -} - -/// Vector add. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vadd))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(add) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vaddq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - unsafe { simd_add(a, b) } -} - -/// Vector add. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vadd))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(add) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - unsafe { simd_add(a, b) } -} - -/// Vector add. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vadd))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(add) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - unsafe { simd_add(a, b) } -} - -/// Vector add. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vadd))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(add) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - unsafe { simd_add(a, b) } -} - -/// Vector add. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vadd))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(add) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - unsafe { simd_add(a, b) } -} - -/// Vector add. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vadd))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(add) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - unsafe { simd_add(a, b) } -} - -/// Vector add. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vadd))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(add) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - unsafe { simd_add(a, b) } -} - -/// Vector add. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vadd))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(add) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vaddq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - unsafe { simd_add(a, b) } -} - -/// Vector add. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vadd))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fadd) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vadd_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - unsafe { simd_add(a, b) } -} - -/// Vector add. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vadd))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fadd) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vaddq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - unsafe { simd_add(a, b) } -} - -/// Signed Add Long (vector). -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(saddl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vaddl_s8(a: int8x8_t, b: int8x8_t) -> int16x8_t { - unsafe { - let a: int16x8_t = simd_cast(a); - let b: int16x8_t = simd_cast(b); - simd_add(a, b) - } -} - -/// Signed Add Long (vector). -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(saddl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vaddl_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t { - unsafe { - let a: int32x4_t = simd_cast(a); - let b: int32x4_t = simd_cast(b); - simd_add(a, b) - } -} - -/// Signed Add Long (vector). -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(saddl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vaddl_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { - unsafe { - let a: int64x2_t = simd_cast(a); - let b: int64x2_t = simd_cast(b); - simd_add(a, b) - } -} - -/// Unsigned Add Long (vector). -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uaddl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vaddl_u8(a: uint8x8_t, b: uint8x8_t) -> uint16x8_t { - unsafe { - let a: uint16x8_t = simd_cast(a); - let b: uint16x8_t = simd_cast(b); - simd_add(a, b) - } -} - -/// Unsigned Add Long (vector). -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uaddl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vaddl_u16(a: uint16x4_t, b: uint16x4_t) -> uint32x4_t { - unsafe { - let a: uint32x4_t = simd_cast(a); - let b: uint32x4_t = simd_cast(b); - simd_add(a, b) - } -} - -/// Unsigned Add Long (vector). -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uaddl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vaddl_u32(a: uint32x2_t, b: uint32x2_t) -> uint64x2_t { - unsafe { - let a: uint64x2_t = simd_cast(a); - let b: uint64x2_t = simd_cast(b); - simd_add(a, b) - } -} - -/// Signed Add Long (vector, high half). -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(saddl2) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vaddl_high_s8(a: int8x16_t, b: int8x16_t) -> int16x8_t { - unsafe { - let a: int8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]); - let b: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]); - let a: int16x8_t = simd_cast(a); - let b: int16x8_t = simd_cast(b); - simd_add(a, b) - } -} - -/// Signed Add Long (vector, high half). -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(saddl2) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vaddl_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t { - unsafe { - let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]); - let b: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]); - let a: int32x4_t = simd_cast(a); - let b: int32x4_t = simd_cast(b); - simd_add(a, b) - } -} - -/// Signed Add Long (vector, high half). -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(saddl2) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vaddl_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t { - unsafe { - let a: int32x2_t = simd_shuffle!(a, a, [2, 3]); - let b: int32x2_t = simd_shuffle!(b, b, [2, 3]); - let a: int64x2_t = simd_cast(a); - let b: int64x2_t = simd_cast(b); - simd_add(a, b) - } -} - -/// Unsigned Add Long (vector, high half). -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uaddl2) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vaddl_high_u8(a: uint8x16_t, b: uint8x16_t) -> uint16x8_t { - unsafe { - let a: uint8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]); - let b: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]); - let a: uint16x8_t = simd_cast(a); - let b: uint16x8_t = simd_cast(b); - simd_add(a, b) - } -} - -/// Unsigned Add Long (vector, high half). -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uaddl2) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vaddl_high_u16(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t { - unsafe { - let a: uint16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]); - let b: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]); - let a: uint32x4_t = simd_cast(a); - let b: uint32x4_t = simd_cast(b); - simd_add(a, b) - } -} - -/// Unsigned Add Long (vector, high half). -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uaddl2) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vaddl_high_u32(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t { - unsafe { - let a: uint32x2_t = simd_shuffle!(a, a, [2, 3]); - let b: uint32x2_t = simd_shuffle!(b, b, [2, 3]); - let a: uint64x2_t = simd_cast(a); - let b: uint64x2_t = simd_cast(b); - simd_add(a, b) - } -} - -/// Signed Add Wide. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddw))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(saddw) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vaddw_s8(a: int16x8_t, b: int8x8_t) -> int16x8_t { - unsafe { - let b: int16x8_t = simd_cast(b); - simd_add(a, b) - } -} - -/// Signed Add Wide. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddw))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(saddw) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vaddw_s16(a: int32x4_t, b: int16x4_t) -> int32x4_t { - unsafe { - let b: int32x4_t = simd_cast(b); - simd_add(a, b) - } -} - -/// Signed Add Wide. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddw))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(saddw) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vaddw_s32(a: int64x2_t, b: int32x2_t) -> int64x2_t { - unsafe { - let b: int64x2_t = simd_cast(b); - simd_add(a, b) - } -} - -/// Unsigned Add Wide. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddw))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uaddw) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vaddw_u8(a: uint16x8_t, b: uint8x8_t) -> uint16x8_t { - unsafe { - let b: uint16x8_t = simd_cast(b); - simd_add(a, b) - } -} - -/// Unsigned Add Wide. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddw))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uaddw) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vaddw_u16(a: uint32x4_t, b: uint16x4_t) -> uint32x4_t { - unsafe { - let b: uint32x4_t = simd_cast(b); - simd_add(a, b) - } -} - -/// Unsigned Add Wide. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddw))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uaddw) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vaddw_u32(a: uint64x2_t, b: uint32x2_t) -> uint64x2_t { - unsafe { - let b: uint64x2_t = simd_cast(b); - simd_add(a, b) - } -} - -/// Signed Add Wide (high half). -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddw))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(saddw2) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vaddw_high_s8(a: int16x8_t, b: int8x16_t) -> int16x8_t { - unsafe { - let b: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]); - let b: int16x8_t = simd_cast(b); - simd_add(a, b) - } -} - -/// Signed Add Wide (high half). -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddw))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(saddw2) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vaddw_high_s16(a: int32x4_t, b: int16x8_t) -> int32x4_t { - unsafe { - let b: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]); - let b: int32x4_t = simd_cast(b); - simd_add(a, b) - } -} - -/// Signed Add Wide (high half). -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddw))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(saddw2) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vaddw_high_s32(a: int64x2_t, b: int32x4_t) -> int64x2_t { - unsafe { - let b: int32x2_t = simd_shuffle!(b, b, [2, 3]); - let b: int64x2_t = simd_cast(b); - simd_add(a, b) - } -} - -/// Unsigned Add Wide (high half). -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddw))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uaddw2) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vaddw_high_u8(a: uint16x8_t, b: uint8x16_t) -> uint16x8_t { - unsafe { - let b: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]); - let b: uint16x8_t = simd_cast(b); - simd_add(a, b) - } -} - -/// Unsigned Add Wide (high half). -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddw))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uaddw2) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vaddw_high_u16(a: uint32x4_t, b: uint16x8_t) -> uint32x4_t { - unsafe { - let b: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]); - let b: uint32x4_t = simd_cast(b); - simd_add(a, b) - } -} - -/// Unsigned Add Wide (high half). -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddw))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uaddw2) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vaddw_high_u32(a: uint64x2_t, b: uint32x4_t) -> uint64x2_t { - unsafe { - let b: uint32x2_t = simd_shuffle!(b, b, [2, 3]); - let b: uint64x2_t = simd_cast(b); - simd_add(a, b) - } -} - -/// Add returning High Narrow. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddhn))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(addhn) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vaddhn_s16(a: int16x8_t, b: int16x8_t) -> int8x8_t { - unsafe { simd_cast(simd_shr(simd_add(a, b), int16x8_t::splat(8))) } -} - -/// Add returning High Narrow. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddhn))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(addhn) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vaddhn_s32(a: int32x4_t, b: int32x4_t) -> int16x4_t { - unsafe { simd_cast(simd_shr(simd_add(a, b), int32x4_t::splat(16))) } -} - -/// Add returning High Narrow. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddhn))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(addhn) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vaddhn_s64(a: int64x2_t, b: int64x2_t) -> int32x2_t { - unsafe { simd_cast(simd_shr(simd_add(a, b), int64x2_t::splat(32))) } -} - -/// Add returning High Narrow. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddhn))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(addhn) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vaddhn_u16(a: uint16x8_t, b: uint16x8_t) -> uint8x8_t { - unsafe { simd_cast(simd_shr(simd_add(a, b), uint16x8_t::splat(8))) } -} - -/// Add returning High Narrow. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddhn))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(addhn) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vaddhn_u32(a: uint32x4_t, b: uint32x4_t) -> uint16x4_t { - unsafe { simd_cast(simd_shr(simd_add(a, b), uint32x4_t::splat(16))) } -} - -/// Add returning High Narrow. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddhn))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(addhn) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vaddhn_u64(a: uint64x2_t, b: uint64x2_t) -> uint32x2_t { - unsafe { simd_cast(simd_shr(simd_add(a, b), uint64x2_t::splat(32))) } -} - -/// Add returning High Narrow (high half). -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddhn))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(addhn2) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vaddhn_high_s16(r: int8x8_t, a: int16x8_t, b: int16x8_t) -> int8x16_t { - unsafe { - let x = simd_cast(simd_shr(simd_add(a, b), int16x8_t::splat(8))); - simd_shuffle!(r, x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) - } -} - -/// Add returning High Narrow (high half). -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddhn))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(addhn2) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vaddhn_high_s32(r: int16x4_t, a: int32x4_t, b: int32x4_t) -> int16x8_t { - unsafe { - let x = simd_cast(simd_shr(simd_add(a, b), int32x4_t::splat(16))); - simd_shuffle!(r, x, [0, 1, 2, 3, 4, 5, 6, 7]) - } -} - -/// Add returning High Narrow (high half). -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddhn))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(addhn2) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vaddhn_high_s64(r: int32x2_t, a: int64x2_t, b: int64x2_t) -> int32x4_t { - unsafe { - let x = simd_cast(simd_shr(simd_add(a, b), int64x2_t::splat(32))); - simd_shuffle!(r, x, [0, 1, 2, 3]) - } -} - -/// Add returning High Narrow (high half). -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddhn))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(addhn2) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vaddhn_high_u16(r: uint8x8_t, a: uint16x8_t, b: uint16x8_t) -> uint8x16_t { - unsafe { - let x = simd_cast(simd_shr(simd_add(a, b), uint16x8_t::splat(8))); - simd_shuffle!(r, x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) - } -} - -/// Add returning High Narrow (high half). -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddhn))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(addhn2) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vaddhn_high_u32(r: uint16x4_t, a: uint32x4_t, b: uint32x4_t) -> uint16x8_t { - unsafe { - let x = simd_cast(simd_shr(simd_add(a, b), uint32x4_t::splat(16))); - simd_shuffle!(r, x, [0, 1, 2, 3, 4, 5, 6, 7]) - } -} - -/// Add returning High Narrow (high half). -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddhn))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(addhn2) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vaddhn_high_u64(r: uint32x2_t, a: uint64x2_t, b: uint64x2_t) -> uint32x4_t { - unsafe { - let x = simd_cast(simd_shr(simd_add(a, b), uint64x2_t::splat(32))); - simd_shuffle!(r, x, [0, 1, 2, 3]) - } -} - -/// Vector narrow integer. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovn))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(xtn) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vmovn_s16(a: int16x8_t) -> int8x8_t { - unsafe { simd_cast(a) } -} - -/// Vector narrow integer. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovn))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(xtn) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vmovn_s32(a: int32x4_t) -> int16x4_t { - unsafe { simd_cast(a) } -} - -/// Vector narrow integer. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovn))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(xtn) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vmovn_s64(a: int64x2_t) -> int32x2_t { - unsafe { simd_cast(a) } -} - -/// Vector narrow integer. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovn))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(xtn) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vmovn_u16(a: uint16x8_t) -> uint8x8_t { - unsafe { simd_cast(a) } -} - -/// Vector narrow integer. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovn))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(xtn) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vmovn_u32(a: uint32x4_t) -> uint16x4_t { - unsafe { simd_cast(a) } -} - -/// Vector narrow integer. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovn))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(xtn) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vmovn_u64(a: uint64x2_t) -> uint32x2_t { - unsafe { simd_cast(a) } -} - -/// Vector long move. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sxtl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vmovl_s8(a: int8x8_t) -> int16x8_t { - unsafe { simd_cast(a) } -} - -/// Vector long move. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sxtl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vmovl_s16(a: int16x4_t) -> int32x4_t { - unsafe { simd_cast(a) } -} - -/// Vector long move. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sxtl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vmovl_s32(a: int32x2_t) -> int64x2_t { - unsafe { simd_cast(a) } -} - -/// Vector long move. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uxtl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vmovl_u8(a: uint8x8_t) -> uint16x8_t { - unsafe { simd_cast(a) } -} - -/// Vector long move. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uxtl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vmovl_u16(a: uint16x4_t) -> uint32x4_t { - unsafe { simd_cast(a) } -} - -/// Vector long move. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uxtl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vmovl_u32(a: uint32x2_t) -> uint64x2_t { - unsafe { simd_cast(a) } -} - -/// Vector bitwise not. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mvn) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vmvn_s8(a: int8x8_t) -> int8x8_t { - let b = int8x8_t::splat(-1); - unsafe { simd_xor(a, b) } -} - -/// Vector bitwise not. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mvn) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vmvnq_s8(a: int8x16_t) -> int8x16_t { - let b = int8x16_t::splat(-1); - unsafe { simd_xor(a, b) } -} - -/// Vector bitwise not. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mvn) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vmvn_s16(a: int16x4_t) -> int16x4_t { - let b = int16x4_t::splat(-1); - unsafe { simd_xor(a, b) } -} - -/// Vector bitwise not. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mvn) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vmvnq_s16(a: int16x8_t) -> int16x8_t { - let b = int16x8_t::splat(-1); - unsafe { simd_xor(a, b) } -} - -/// Vector bitwise not. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mvn) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vmvn_s32(a: int32x2_t) -> int32x2_t { - let b = int32x2_t::splat(-1); - unsafe { simd_xor(a, b) } -} - -/// Vector bitwise not. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mvn) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vmvnq_s32(a: int32x4_t) -> int32x4_t { - let b = int32x4_t::splat(-1); - unsafe { simd_xor(a, b) } -} - -/// Vector bitwise not. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mvn) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vmvn_u8(a: uint8x8_t) -> uint8x8_t { - let b = uint8x8_t::splat(255); - unsafe { simd_xor(a, b) } -} - -/// Vector bitwise not. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mvn) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vmvnq_u8(a: uint8x16_t) -> uint8x16_t { - let b = uint8x16_t::splat(255); - unsafe { simd_xor(a, b) } -} - -/// Vector bitwise not. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mvn) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vmvn_u16(a: uint16x4_t) -> uint16x4_t { - let b = uint16x4_t::splat(65_535); - unsafe { simd_xor(a, b) } -} - -/// Vector bitwise not. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mvn) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vmvnq_u16(a: uint16x8_t) -> uint16x8_t { - let b = uint16x8_t::splat(65_535); - unsafe { simd_xor(a, b) } -} - -/// Vector bitwise not. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mvn) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vmvn_u32(a: uint32x2_t) -> uint32x2_t { - let b = uint32x2_t::splat(4_294_967_295); - unsafe { simd_xor(a, b) } -} - -/// Vector bitwise not. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mvn) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vmvnq_u32(a: uint32x4_t) -> uint32x4_t { - let b = uint32x4_t::splat(4_294_967_295); - unsafe { simd_xor(a, b) } -} - -/// Vector bitwise not. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mvn) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vmvn_p8(a: poly8x8_t) -> poly8x8_t { - let b = poly8x8_t::splat(255); - unsafe { simd_xor(a, b) } -} - -/// Vector bitwise not. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mvn) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vmvnq_p8(a: poly8x16_t) -> poly8x16_t { - let b = poly8x16_t::splat(255); - unsafe { simd_xor(a, b) } -} - -/// Vector bitwise bit clear -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(bic) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vbic_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - let c = int8x8_t::splat(-1); - unsafe { simd_and(simd_xor(b, c), a) } -} - -/// Vector bitwise bit clear -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(bic) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vbicq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - let c = int8x16_t::splat(-1); - unsafe { simd_and(simd_xor(b, c), a) } -} - -/// Vector bitwise bit clear -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(bic) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vbic_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - let c = int16x4_t::splat(-1); - unsafe { simd_and(simd_xor(b, c), a) } -} - -/// Vector bitwise bit clear -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(bic) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vbicq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - let c = int16x8_t::splat(-1); - unsafe { simd_and(simd_xor(b, c), a) } -} - -/// Vector bitwise bit clear -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(bic) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vbic_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - let c = int32x2_t::splat(-1); - unsafe { simd_and(simd_xor(b, c), a) } -} - -/// Vector bitwise bit clear -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(bic) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vbicq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - let c = int32x4_t::splat(-1); - unsafe { simd_and(simd_xor(b, c), a) } -} - -/// Vector bitwise bit clear -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(bic) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vbic_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { - let c = int64x1_t::splat(-1); - unsafe { simd_and(simd_xor(b, c), a) } -} - -/// Vector bitwise bit clear -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(bic) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vbicq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - let c = int64x2_t::splat(-1); - unsafe { simd_and(simd_xor(b, c), a) } -} - -/// Vector bitwise bit clear -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(bic) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vbic_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - let c = int8x8_t::splat(-1); - unsafe { simd_and(simd_xor(b, transmute(c)), a) } -} - -/// Vector bitwise bit clear -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(bic) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vbicq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - let c = int8x16_t::splat(-1); - unsafe { simd_and(simd_xor(b, transmute(c)), a) } -} - -/// Vector bitwise bit clear -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(bic) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vbic_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - let c = int16x4_t::splat(-1); - unsafe { simd_and(simd_xor(b, transmute(c)), a) } -} - -/// Vector bitwise bit clear -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(bic) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vbicq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - let c = int16x8_t::splat(-1); - unsafe { simd_and(simd_xor(b, transmute(c)), a) } -} - -/// Vector bitwise bit clear -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(bic) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vbic_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - let c = int32x2_t::splat(-1); - unsafe { simd_and(simd_xor(b, transmute(c)), a) } -} - -/// Vector bitwise bit clear -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(bic) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vbicq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - let c = int32x4_t::splat(-1); - unsafe { simd_and(simd_xor(b, transmute(c)), a) } -} - -/// Vector bitwise bit clear -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(bic) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vbic_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { - let c = int64x1_t::splat(-1); - unsafe { simd_and(simd_xor(b, transmute(c)), a) } -} - -/// Vector bitwise bit clear -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(bic) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vbicq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - let c = int64x2_t::splat(-1); - unsafe { simd_and(simd_xor(b, transmute(c)), a) } -} - -/// Bitwise Select instructions. This instruction sets each bit in the destination SIMD&FP register -/// to the corresponding bit from the first source SIMD&FP register when the original -/// destination bit was 1, otherwise from the second source SIMD&FP register. - -/// Bitwise Select. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(bsl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vbsl_s8(a: uint8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { - let not = int8x8_t::splat(-1); - unsafe { - transmute(simd_or( - simd_and(a, transmute(b)), - simd_and(simd_xor(a, transmute(not)), transmute(c)), - )) - } -} - -/// Bitwise Select. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(bsl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vbsl_s16(a: uint16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { - let not = int16x4_t::splat(-1); - unsafe { - transmute(simd_or( - simd_and(a, transmute(b)), - simd_and(simd_xor(a, transmute(not)), transmute(c)), - )) - } -} - -/// Bitwise Select. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(bsl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vbsl_s32(a: uint32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { - let not = int32x2_t::splat(-1); - unsafe { - transmute(simd_or( - simd_and(a, transmute(b)), - simd_and(simd_xor(a, transmute(not)), transmute(c)), - )) - } -} - -/// Bitwise Select. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(bsl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vbsl_s64(a: uint64x1_t, b: int64x1_t, c: int64x1_t) -> int64x1_t { - let not = int64x1_t::splat(-1); - unsafe { - transmute(simd_or( - simd_and(a, transmute(b)), - simd_and(simd_xor(a, transmute(not)), transmute(c)), - )) - } -} - -/// Bitwise Select. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(bsl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vbsl_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t { - let not = int8x8_t::splat(-1); - unsafe { simd_or(simd_and(a, b), simd_and(simd_xor(a, transmute(not)), c)) } -} - -/// Bitwise Select. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(bsl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vbsl_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x4_t) -> uint16x4_t { - let not = int16x4_t::splat(-1); - unsafe { simd_or(simd_and(a, b), simd_and(simd_xor(a, transmute(not)), c)) } -} - -/// Bitwise Select. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(bsl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vbsl_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x2_t) -> uint32x2_t { - let not = int32x2_t::splat(-1); - unsafe { simd_or(simd_and(a, b), simd_and(simd_xor(a, transmute(not)), c)) } -} - -/// Bitwise Select. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(bsl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vbsl_u64(a: uint64x1_t, b: uint64x1_t, c: uint64x1_t) -> uint64x1_t { - let not = int64x1_t::splat(-1); - unsafe { simd_or(simd_and(a, b), simd_and(simd_xor(a, transmute(not)), c)) } -} - -/// Bitwise Select. -#[inline] -#[target_feature(enable = "neon,fp16")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(bsl) -)] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vbsl_f16(a: uint16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t { - let not = int16x4_t::splat(-1); - unsafe { - transmute(simd_or( - simd_and(a, transmute(b)), - simd_and(simd_xor(a, transmute(not)), transmute(c)), - )) - } -} - -/// Bitwise Select. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(bsl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vbsl_f32(a: uint32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { - let not = int32x2_t::splat(-1); - unsafe { - transmute(simd_or( - simd_and(a, transmute(b)), - simd_and(simd_xor(a, transmute(not)), transmute(c)), - )) - } -} - -/// Bitwise Select. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(bsl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vbsl_p8(a: uint8x8_t, b: poly8x8_t, c: poly8x8_t) -> poly8x8_t { - let not = int8x8_t::splat(-1); - unsafe { - transmute(simd_or( - simd_and(a, transmute(b)), - simd_and(simd_xor(a, transmute(not)), transmute(c)), - )) - } -} - -/// Bitwise Select. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(bsl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vbsl_p16(a: uint16x4_t, b: poly16x4_t, c: poly16x4_t) -> poly16x4_t { - let not = int16x4_t::splat(-1); - unsafe { - transmute(simd_or( - simd_and(a, transmute(b)), - simd_and(simd_xor(a, transmute(not)), transmute(c)), - )) - } -} - -/// Bitwise Select. (128-bit) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(bsl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vbslq_s8(a: uint8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t { - let not = int8x16_t::splat(-1); - unsafe { - transmute(simd_or( - simd_and(a, transmute(b)), - simd_and(simd_xor(a, transmute(not)), transmute(c)), - )) - } -} - -/// Bitwise Select. (128-bit) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(bsl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vbslq_s16(a: uint16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { - let not = int16x8_t::splat(-1); - unsafe { - transmute(simd_or( - simd_and(a, transmute(b)), - simd_and(simd_xor(a, transmute(not)), transmute(c)), - )) - } -} - -/// Bitwise Select. (128-bit) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(bsl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vbslq_s32(a: uint32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { - let not = int32x4_t::splat(-1); - unsafe { - transmute(simd_or( - simd_and(a, transmute(b)), - simd_and(simd_xor(a, transmute(not)), transmute(c)), - )) - } -} - -/// Bitwise Select. (128-bit) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(bsl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vbslq_s64(a: uint64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t { - let not = int64x2_t::splat(-1); - unsafe { - transmute(simd_or( - simd_and(a, transmute(b)), - simd_and(simd_xor(a, transmute(not)), transmute(c)), - )) - } -} - -/// Bitwise Select. (128-bit) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(bsl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vbslq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t { - let not = int8x16_t::splat(-1); - unsafe { simd_or(simd_and(a, b), simd_and(simd_xor(a, transmute(not)), c)) } -} - -/// Bitwise Select. (128-bit) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(bsl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vbslq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t { - let not = int16x8_t::splat(-1); - unsafe { simd_or(simd_and(a, b), simd_and(simd_xor(a, transmute(not)), c)) } -} - -/// Bitwise Select. (128-bit) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(bsl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vbslq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { - let not = int32x4_t::splat(-1); - unsafe { simd_or(simd_and(a, b), simd_and(simd_xor(a, transmute(not)), c)) } -} - -/// Bitwise Select. (128-bit) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(bsl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vbslq_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t { - let not = int64x2_t::splat(-1); - unsafe { simd_or(simd_and(a, b), simd_and(simd_xor(a, transmute(not)), c)) } -} - -/// Bitwise Select. (128-bit) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(bsl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vbslq_p8(a: uint8x16_t, b: poly8x16_t, c: poly8x16_t) -> poly8x16_t { - let not = int8x16_t::splat(-1); - unsafe { - transmute(simd_or( - simd_and(a, transmute(b)), - simd_and(simd_xor(a, transmute(not)), transmute(c)), - )) - } -} - -/// Bitwise Select. (128-bit) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(bsl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vbslq_p16(a: uint16x8_t, b: poly16x8_t, c: poly16x8_t) -> poly16x8_t { - let not = int16x8_t::splat(-1); - unsafe { - transmute(simd_or( - simd_and(a, transmute(b)), - simd_and(simd_xor(a, transmute(not)), transmute(c)), - )) - } -} - -/// Bitwise Select. -#[inline] -#[target_feature(enable = "neon,fp16")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(bsl) -)] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vbslq_f16(a: uint16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t { - let not = int16x8_t::splat(-1); - unsafe { - transmute(simd_or( - simd_and(a, transmute(b)), - simd_and(simd_xor(a, transmute(not)), transmute(c)), - )) - } -} - -/// Bitwise Select. (128-bit) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(bsl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vbslq_f32(a: uint32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { - let not = int32x4_t::splat(-1); - unsafe { - transmute(simd_or( - simd_and(a, transmute(b)), - simd_and(simd_xor(a, transmute(not)), transmute(c)), - )) - } -} - -/// Vector bitwise inclusive OR NOT -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(orn) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vorn_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - let c = int8x8_t::splat(-1); - unsafe { simd_or(simd_xor(b, c), a) } -} - -/// Vector bitwise inclusive OR NOT -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(orn) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vornq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - let c = int8x16_t::splat(-1); - unsafe { simd_or(simd_xor(b, c), a) } -} - -/// Vector bitwise inclusive OR NOT -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(orn) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vorn_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - let c = int16x4_t::splat(-1); - unsafe { simd_or(simd_xor(b, c), a) } -} - -/// Vector bitwise inclusive OR NOT -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(orn) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vornq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - let c = int16x8_t::splat(-1); - unsafe { simd_or(simd_xor(b, c), a) } -} - -/// Vector bitwise inclusive OR NOT -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(orn) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vorn_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - let c = int32x2_t::splat(-1); - unsafe { simd_or(simd_xor(b, c), a) } -} - -/// Vector bitwise inclusive OR NOT -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(orn) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vornq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - let c = int32x4_t::splat(-1); - unsafe { simd_or(simd_xor(b, c), a) } -} - -/// Vector bitwise inclusive OR NOT -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(orn) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vorn_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { - let c = int64x1_t::splat(-1); - unsafe { simd_or(simd_xor(b, c), a) } -} - -/// Vector bitwise inclusive OR NOT -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(orn) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vornq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - let c = int64x2_t::splat(-1); - unsafe { simd_or(simd_xor(b, c), a) } -} - -/// Vector bitwise inclusive OR NOT -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(orn) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vorn_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - let c = int8x8_t::splat(-1); - unsafe { simd_or(simd_xor(b, transmute(c)), a) } -} - -/// Vector bitwise inclusive OR NOT -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(orn) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vornq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - let c = int8x16_t::splat(-1); - unsafe { simd_or(simd_xor(b, transmute(c)), a) } -} - -/// Vector bitwise inclusive OR NOT -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(orn) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vorn_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - let c = int16x4_t::splat(-1); - unsafe { simd_or(simd_xor(b, transmute(c)), a) } -} - -/// Vector bitwise inclusive OR NOT -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(orn) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vornq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - let c = int16x8_t::splat(-1); - unsafe { simd_or(simd_xor(b, transmute(c)), a) } -} - -/// Vector bitwise inclusive OR NOT -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(orn) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vorn_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - let c = int32x2_t::splat(-1); - unsafe { simd_or(simd_xor(b, transmute(c)), a) } -} - -/// Vector bitwise inclusive OR NOT -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(orn) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vornq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - let c = int32x4_t::splat(-1); - unsafe { simd_or(simd_xor(b, transmute(c)), a) } -} - -/// Vector bitwise inclusive OR NOT -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(orn) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vorn_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { - let c = int64x1_t::splat(-1); - unsafe { simd_or(simd_xor(b, transmute(c)), a) } -} - -/// Vector bitwise inclusive OR NOT -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(orn) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vornq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - let c = int64x2_t::splat(-1); - unsafe { simd_or(simd_xor(b, transmute(c)), a) } -} - -/// Move vector element to general-purpose register -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(1)] -#[cfg_attr(test, assert_instr(nop, IMM5 = 1))] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vgetq_lane_u64(v: uint64x2_t) -> u64 { - static_assert_uimm_bits!(IMM5, 1); - unsafe { simd_extract!(v, IMM5 as u32) } -} - -/// Move vector element to general-purpose register -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(1)] -#[cfg_attr(test, assert_instr(nop, IMM5 = 0))] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vget_lane_u64(v: uint64x1_t) -> u64 { - static_assert!(IMM5 == 0); - unsafe { simd_extract!(v, 0) } -} - -/// Move vector element to general-purpose register -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(1)] -#[cfg_attr(test, assert_instr(nop, IMM5 = 2))] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vget_lane_u16(v: uint16x4_t) -> u16 { - static_assert_uimm_bits!(IMM5, 2); - unsafe { simd_extract!(v, IMM5 as u32) } -} - -/// Move vector element to general-purpose register -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(1)] -#[cfg_attr(test, assert_instr(nop, IMM5 = 2))] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vget_lane_s16(v: int16x4_t) -> i16 { - static_assert_uimm_bits!(IMM5, 2); - unsafe { simd_extract!(v, IMM5 as u32) } -} - -/// Move vector element to general-purpose register -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(1)] -#[cfg_attr(test, assert_instr(nop, IMM5 = 2))] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vget_lane_p16(v: poly16x4_t) -> p16 { - static_assert_uimm_bits!(IMM5, 2); - unsafe { simd_extract!(v, IMM5 as u32) } -} - -/// Move vector element to general-purpose register -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(1)] -#[cfg_attr(test, assert_instr(nop, IMM5 = 1))] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vget_lane_u32(v: uint32x2_t) -> u32 { - static_assert_uimm_bits!(IMM5, 1); - unsafe { simd_extract!(v, IMM5 as u32) } -} - -/// Move vector element to general-purpose register -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(1)] -#[cfg_attr(test, assert_instr(nop, IMM5 = 1))] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vget_lane_s32(v: int32x2_t) -> i32 { - static_assert_uimm_bits!(IMM5, 1); - unsafe { simd_extract!(v, IMM5 as u32) } -} - -/// Duplicate vector element to vector or scalar -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(1)] -#[cfg_attr(test, assert_instr(nop, IMM5 = 1))] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vget_lane_f32(v: float32x2_t) -> f32 { - static_assert_uimm_bits!(IMM5, 1); - unsafe { simd_extract!(v, IMM5 as u32) } -} - -/// Duplicate vector element to vector or scalar -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(1)] -#[cfg_attr(test, assert_instr(nop, IMM5 = 1))] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vgetq_lane_f32(v: float32x4_t) -> f32 { - static_assert_uimm_bits!(IMM5, 2); - unsafe { simd_extract!(v, IMM5 as u32) } -} - -/// Move vector element to general-purpose register -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(1)] -#[cfg_attr(test, assert_instr(nop, IMM5 = 0))] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vget_lane_p64(v: poly64x1_t) -> p64 { - static_assert!(IMM5 == 0); - unsafe { simd_extract!(v, IMM5 as u32) } -} - -/// Move vector element to general-purpose register -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(1)] -#[cfg_attr(test, assert_instr(nop, IMM5 = 0))] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vgetq_lane_p64(v: poly64x2_t) -> p64 { - static_assert_uimm_bits!(IMM5, 1); - unsafe { simd_extract!(v, IMM5 as u32) } -} - -/// Move vector element to general-purpose register -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(1)] -#[cfg_attr(test, assert_instr(nop, IMM5 = 0))] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vget_lane_s64(v: int64x1_t) -> i64 { - static_assert!(IMM5 == 0); - unsafe { simd_extract!(v, IMM5 as u32) } -} - -/// Move vector element to general-purpose register -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(1)] -#[cfg_attr(test, assert_instr(nop, IMM5 = 0))] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vgetq_lane_s64(v: int64x2_t) -> i64 { - static_assert_uimm_bits!(IMM5, 1); - unsafe { simd_extract!(v, IMM5 as u32) } -} - -/// Move vector element to general-purpose register -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(1)] -#[cfg_attr(test, assert_instr(nop, IMM5 = 2))] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vgetq_lane_u16(v: uint16x8_t) -> u16 { - static_assert_uimm_bits!(IMM5, 3); - unsafe { simd_extract!(v, IMM5 as u32) } -} - -/// Move vector element to general-purpose register -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(1)] -#[cfg_attr(test, assert_instr(nop, IMM5 = 2))] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vgetq_lane_u32(v: uint32x4_t) -> u32 { - static_assert_uimm_bits!(IMM5, 2); - unsafe { simd_extract!(v, IMM5 as u32) } -} - -/// Move vector element to general-purpose register -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(1)] -#[cfg_attr(test, assert_instr(nop, IMM5 = 2))] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vgetq_lane_s16(v: int16x8_t) -> i16 { - static_assert_uimm_bits!(IMM5, 3); - unsafe { simd_extract!(v, IMM5 as u32) } -} - -/// Move vector element to general-purpose register -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(1)] -#[cfg_attr(test, assert_instr(nop, IMM5 = 2))] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vgetq_lane_p16(v: poly16x8_t) -> p16 { - static_assert_uimm_bits!(IMM5, 3); - unsafe { simd_extract!(v, IMM5 as u32) } -} - -/// Move vector element to general-purpose register -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(1)] -#[cfg_attr(test, assert_instr(nop, IMM5 = 2))] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vgetq_lane_s32(v: int32x4_t) -> i32 { - static_assert_uimm_bits!(IMM5, 2); - unsafe { simd_extract!(v, IMM5 as u32) } -} - -/// Move vector element to general-purpose register -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(1)] -#[cfg_attr(test, assert_instr(nop, IMM5 = 2))] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vget_lane_u8(v: uint8x8_t) -> u8 { - static_assert_uimm_bits!(IMM5, 3); - unsafe { simd_extract!(v, IMM5 as u32) } -} - -/// Move vector element to general-purpose register -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(1)] -#[cfg_attr(test, assert_instr(nop, IMM5 = 2))] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vget_lane_s8(v: int8x8_t) -> i8 { - static_assert_uimm_bits!(IMM5, 3); - unsafe { simd_extract!(v, IMM5 as u32) } -} - -/// Move vector element to general-purpose register -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(1)] -#[cfg_attr(test, assert_instr(nop, IMM5 = 2))] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vget_lane_p8(v: poly8x8_t) -> p8 { - static_assert_uimm_bits!(IMM5, 3); - unsafe { simd_extract!(v, IMM5 as u32) } -} - -/// Move vector element to general-purpose register -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(1)] -#[cfg_attr(test, assert_instr(nop, IMM5 = 2))] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vgetq_lane_u8(v: uint8x16_t) -> u8 { - static_assert_uimm_bits!(IMM5, 4); - unsafe { simd_extract!(v, IMM5 as u32) } -} - -/// Move vector element to general-purpose register -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(1)] -#[cfg_attr(test, assert_instr(nop, IMM5 = 2))] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vgetq_lane_s8(v: int8x16_t) -> i8 { - static_assert_uimm_bits!(IMM5, 4); - unsafe { simd_extract!(v, IMM5 as u32) } -} - -/// Move vector element to general-purpose register -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(1)] -#[cfg_attr(test, assert_instr(nop, IMM5 = 2))] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vgetq_lane_p8(v: poly8x16_t) -> p8 { - static_assert_uimm_bits!(IMM5, 4); - unsafe { simd_extract!(v, IMM5 as u32) } -} - -/// Duplicate vector element to vector or scalar -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vget_high_s8(a: int8x16_t) -> int8x8_t { - unsafe { simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]) } -} - -/// Duplicate vector element to vector or scalar -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vget_high_s16(a: int16x8_t) -> int16x4_t { - unsafe { simd_shuffle!(a, a, [4, 5, 6, 7]) } -} - -/// Duplicate vector element to vector or scalar -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vget_high_s32(a: int32x4_t) -> int32x2_t { - unsafe { simd_shuffle!(a, a, [2, 3]) } -} - -/// Duplicate vector element to vector or scalar -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vget_high_s64(a: int64x2_t) -> int64x1_t { - unsafe { int64x1_t([simd_extract!(a, 1)]) } -} - -/// Duplicate vector element to vector or scalar -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vget_high_u8(a: uint8x16_t) -> uint8x8_t { - unsafe { simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]) } -} - -/// Duplicate vector element to vector or scalar -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vget_high_u16(a: uint16x8_t) -> uint16x4_t { - unsafe { simd_shuffle!(a, a, [4, 5, 6, 7]) } -} - -/// Duplicate vector element to vector or scalar -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vget_high_u32(a: uint32x4_t) -> uint32x2_t { - unsafe { simd_shuffle!(a, a, [2, 3]) } -} - -/// Duplicate vector element to vector or scalar -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vget_high_u64(a: uint64x2_t) -> uint64x1_t { - unsafe { uint64x1_t([simd_extract!(a, 1)]) } -} - -/// Duplicate vector element to vector or scalar -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vget_high_p8(a: poly8x16_t) -> poly8x8_t { - unsafe { simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]) } -} - -/// Duplicate vector element to vector or scalar -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vget_high_p16(a: poly16x8_t) -> poly16x4_t { - unsafe { simd_shuffle!(a, a, [4, 5, 6, 7]) } -} - -/// Duplicate vector element to vector or scalar -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vget_high_f32(a: float32x4_t) -> float32x2_t { - unsafe { simd_shuffle!(a, a, [2, 3]) } -} - -/// Duplicate vector element to vector or scalar -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(test, assert_instr(nop))] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "vget_low_s8", since = "1.60.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vget_low_s8(a: int8x16_t) -> int8x8_t { - unsafe { simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]) } -} - -/// Duplicate vector element to vector or scalar -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(test, assert_instr(nop))] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vget_low_s16(a: int16x8_t) -> int16x4_t { - unsafe { simd_shuffle!(a, a, [0, 1, 2, 3]) } -} - -/// Duplicate vector element to vector or scalar -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(test, assert_instr(nop))] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vget_low_s32(a: int32x4_t) -> int32x2_t { - unsafe { simd_shuffle!(a, a, [0, 1]) } -} - -/// Duplicate vector element to vector or scalar -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(test, assert_instr(nop))] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vget_low_s64(a: int64x2_t) -> int64x1_t { - unsafe { int64x1_t([simd_extract!(a, 0)]) } -} - -/// Duplicate vector element to vector or scalar -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(test, assert_instr(nop))] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vget_low_u8(a: uint8x16_t) -> uint8x8_t { - unsafe { simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]) } -} - -/// Duplicate vector element to vector or scalar -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(test, assert_instr(nop))] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vget_low_u16(a: uint16x8_t) -> uint16x4_t { - unsafe { simd_shuffle!(a, a, [0, 1, 2, 3]) } -} - -/// Duplicate vector element to vector or scalar -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(test, assert_instr(nop))] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vget_low_u32(a: uint32x4_t) -> uint32x2_t { - unsafe { simd_shuffle!(a, a, [0, 1]) } -} - -/// Duplicate vector element to vector or scalar -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(test, assert_instr(nop))] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vget_low_u64(a: uint64x2_t) -> uint64x1_t { - unsafe { uint64x1_t([simd_extract!(a, 0)]) } -} - -/// Duplicate vector element to vector or scalar -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(test, assert_instr(nop))] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vget_low_p8(a: poly8x16_t) -> poly8x8_t { - unsafe { simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]) } -} - -/// Duplicate vector element to vector or scalar -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(test, assert_instr(nop))] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vget_low_p16(a: poly16x8_t) -> poly16x4_t { - unsafe { simd_shuffle!(a, a, [0, 1, 2, 3]) } -} - -/// Duplicate vector element to vector or scalar -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(test, assert_instr(nop))] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vget_low_f32(a: float32x4_t) -> float32x2_t { - unsafe { simd_shuffle!(a, a, [0, 1]) } -} - -/// Duplicate vector element to vector or scalar -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vdupq_n_s8(value: i8) -> int8x16_t { - int8x16_t::splat(value) -} - -/// Duplicate vector element to vector or scalar -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vdupq_n_s16(value: i16) -> int16x8_t { - int16x8_t::splat(value) -} - -/// Duplicate vector element to vector or scalar -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vdupq_n_s32(value: i32) -> int32x4_t { - int32x4_t::splat(value) -} - -/// Duplicate vector element to vector or scalar -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vdupq_n_s64(value: i64) -> int64x2_t { - int64x2_t::splat(value) -} - -/// Duplicate vector element to vector or scalar -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vdupq_n_u8(value: u8) -> uint8x16_t { - uint8x16_t::splat(value) -} - -/// Duplicate vector element to vector or scalar -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vdupq_n_u16(value: u16) -> uint16x8_t { - uint16x8_t::splat(value) -} - -/// Duplicate vector element to vector or scalar -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vdupq_n_u32(value: u32) -> uint32x4_t { - uint32x4_t::splat(value) -} - -/// Duplicate vector element to vector or scalar -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vdupq_n_u64(value: u64) -> uint64x2_t { - uint64x2_t::splat(value) -} - -/// Duplicate vector element to vector or scalar -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vdupq_n_p8(value: p8) -> poly8x16_t { - poly8x16_t::splat(value) -} - -/// Duplicate vector element to vector or scalar -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vdupq_n_p16(value: p16) -> poly16x8_t { - poly16x8_t::splat(value) -} - -/// Duplicate vector element to vector or scalar -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vdupq_n_f32(value: f32) -> float32x4_t { - float32x4_t::splat(value) -} - -/// Duplicate vector element to vector or scalar -/// -/// Private vfp4 version used by FMA intriniscs because LLVM does -/// not inline the non-vfp4 version in vfp4 functions. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -fn vdupq_n_f32_vfp4(value: f32) -> float32x4_t { - float32x4_t::splat(value) -} - -/// Duplicate vector element to vector or scalar -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vdup_n_s8(value: i8) -> int8x8_t { - int8x8_t::splat(value) -} - -/// Duplicate vector element to vector or scalar -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vdup_n_s16(value: i16) -> int16x4_t { - int16x4_t::splat(value) -} - -/// Duplicate vector element to vector or scalar -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vdup_n_s32(value: i32) -> int32x2_t { - int32x2_t::splat(value) -} - -/// Duplicate vector element to vector or scalar -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmov) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vdup_n_s64(value: i64) -> int64x1_t { - int64x1_t::splat(value) -} - -/// Duplicate vector element to vector or scalar -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vdup_n_u8(value: u8) -> uint8x8_t { - uint8x8_t::splat(value) -} - -/// Duplicate vector element to vector or scalar -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vdup_n_u16(value: u16) -> uint16x4_t { - uint16x4_t::splat(value) -} - -/// Duplicate vector element to vector or scalar -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vdup_n_u32(value: u32) -> uint32x2_t { - uint32x2_t::splat(value) -} - -/// Duplicate vector element to vector or scalar -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmov) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vdup_n_u64(value: u64) -> uint64x1_t { - uint64x1_t::splat(value) -} - -/// Duplicate vector element to vector or scalar -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vdup_n_p8(value: p8) -> poly8x8_t { - poly8x8_t::splat(value) -} - -/// Duplicate vector element to vector or scalar -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vdup_n_p16(value: p16) -> poly16x4_t { - poly16x4_t::splat(value) -} - -/// Duplicate vector element to vector or scalar -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vdup_n_f32(value: f32) -> float32x2_t { - float32x2_t::splat(value) -} - -/// Duplicate vector element to vector or scalar -/// -/// Private vfp4 version used by FMA intriniscs because LLVM does -/// not inline the non-vfp4 version in vfp4 functions. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -fn vdup_n_f32_vfp4(value: f32) -> float32x2_t { - float32x2_t::splat(value) -} - -/// Load SIMD&FP register (immediate offset) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vldrq_p128(a: *const p128) -> p128 { - *a -} - -/// Store SIMD&FP register (immediate offset) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vstrq_p128(a: *mut p128, b: p128) { - *a = b; -} - -/// Duplicate vector element to vector or scalar -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vmov_n_s8(value: i8) -> int8x8_t { - vdup_n_s8(value) -} - -/// Duplicate vector element to vector or scalar -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vmov_n_s16(value: i16) -> int16x4_t { - vdup_n_s16(value) -} - -/// Duplicate vector element to vector or scalar -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vmov_n_s32(value: i32) -> int32x2_t { - vdup_n_s32(value) -} - -/// Duplicate vector element to vector or scalar -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmov) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vmov_n_s64(value: i64) -> int64x1_t { - vdup_n_s64(value) -} - -/// Duplicate vector element to vector or scalar -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vmov_n_u8(value: u8) -> uint8x8_t { - vdup_n_u8(value) -} - -/// Duplicate vector element to vector or scalar -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vmov_n_u16(value: u16) -> uint16x4_t { - vdup_n_u16(value) -} - -/// Duplicate vector element to vector or scalar -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vmov_n_u32(value: u32) -> uint32x2_t { - vdup_n_u32(value) -} - -/// Duplicate vector element to vector or scalar -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmov) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vmov_n_u64(value: u64) -> uint64x1_t { - vdup_n_u64(value) -} - -/// Duplicate vector element to vector or scalar -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vmov_n_p8(value: p8) -> poly8x8_t { - vdup_n_p8(value) -} - -/// Duplicate vector element to vector or scalar -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vmov_n_p16(value: p16) -> poly16x4_t { - vdup_n_p16(value) -} - -/// Duplicate vector element to vector or scalar -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vmov_n_f32(value: f32) -> float32x2_t { - vdup_n_f32(value) -} - -/// Duplicate vector element to vector or scalar -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vmovq_n_s8(value: i8) -> int8x16_t { - vdupq_n_s8(value) -} - -/// Duplicate vector element to vector or scalar -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vmovq_n_s16(value: i16) -> int16x8_t { - vdupq_n_s16(value) -} - -/// Duplicate vector element to vector or scalar -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vmovq_n_s32(value: i32) -> int32x4_t { - vdupq_n_s32(value) -} - -/// Duplicate vector element to vector or scalar -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vmovq_n_s64(value: i64) -> int64x2_t { - vdupq_n_s64(value) -} - -/// Duplicate vector element to vector or scalar -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vmovq_n_u8(value: u8) -> uint8x16_t { - vdupq_n_u8(value) -} - -/// Duplicate vector element to vector or scalar -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vmovq_n_u16(value: u16) -> uint16x8_t { - vdupq_n_u16(value) -} - -/// Duplicate vector element to vector or scalar -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vmovq_n_u32(value: u32) -> uint32x4_t { - vdupq_n_u32(value) -} - -/// Duplicate vector element to vector or scalar -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vmovq_n_u64(value: u64) -> uint64x2_t { - vdupq_n_u64(value) -} - -/// Duplicate vector element to vector or scalar -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vmovq_n_p8(value: p8) -> poly8x16_t { - vdupq_n_p8(value) -} - -/// Duplicate vector element to vector or scalar -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vmovq_n_p16(value: p16) -> poly16x8_t { - vdupq_n_p16(value) -} - -/// Duplicate vector element to vector or scalar -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vmovq_n_f32(value: f32) -> float32x4_t { - vdupq_n_f32(value) -} - -/// Extract vector from pair of vectors -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("nop", N = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr("nop", N = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vext_s64(a: int64x1_t, _b: int64x1_t) -> int64x1_t { - static_assert!(N == 0); - a -} - -/// Extract vector from pair of vectors -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("nop", N = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr("nop", N = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vext_u64(a: uint64x1_t, _b: uint64x1_t) -> uint64x1_t { - static_assert!(N == 0); - a -} - -/// Reversing vector elements (swap endianness) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev16.8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rev16) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vrev16_s8(a: int8x8_t) -> int8x8_t { - unsafe { simd_shuffle!(a, a, [1, 0, 3, 2, 5, 4, 7, 6]) } -} - -/// Reversing vector elements (swap endianness) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev16.8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rev16) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vrev16q_s8(a: int8x16_t) -> int8x16_t { - unsafe { simd_shuffle!(a, a, [1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14]) } -} - -/// Reversing vector elements (swap endianness) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev16.8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rev16) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vrev16_u8(a: uint8x8_t) -> uint8x8_t { - unsafe { simd_shuffle!(a, a, [1, 0, 3, 2, 5, 4, 7, 6]) } -} - -/// Reversing vector elements (swap endianness) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev16.8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rev16) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vrev16q_u8(a: uint8x16_t) -> uint8x16_t { - unsafe { simd_shuffle!(a, a, [1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14]) } -} - -/// Reversing vector elements (swap endianness) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev16.8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rev16) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vrev16_p8(a: poly8x8_t) -> poly8x8_t { - unsafe { simd_shuffle!(a, a, [1, 0, 3, 2, 5, 4, 7, 6]) } -} - -/// Reversing vector elements (swap endianness) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev16.8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rev16) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vrev16q_p8(a: poly8x16_t) -> poly8x16_t { - unsafe { simd_shuffle!(a, a, [1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14]) } -} - -/// Reversing vector elements (swap endianness) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rev32) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vrev32_s8(a: int8x8_t) -> int8x8_t { - unsafe { simd_shuffle!(a, a, [3, 2, 1, 0, 7, 6, 5, 4]) } -} - -/// Reversing vector elements (swap endianness) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rev32) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vrev32q_s8(a: int8x16_t) -> int8x16_t { - unsafe { simd_shuffle!(a, a, [3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12]) } -} - -/// Reversing vector elements (swap endianness) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rev32) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vrev32_u8(a: uint8x8_t) -> uint8x8_t { - unsafe { simd_shuffle!(a, a, [3, 2, 1, 0, 7, 6, 5, 4]) } -} - -/// Reversing vector elements (swap endianness) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rev32) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vrev32q_u8(a: uint8x16_t) -> uint8x16_t { - unsafe { simd_shuffle!(a, a, [3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12]) } -} - -/// Reversing vector elements (swap endianness) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rev32) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vrev32_s16(a: int16x4_t) -> int16x4_t { - unsafe { simd_shuffle!(a, a, [1, 0, 3, 2]) } -} - -/// Reversing vector elements (swap endianness) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rev32) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vrev32q_s16(a: int16x8_t) -> int16x8_t { - unsafe { simd_shuffle!(a, a, [1, 0, 3, 2, 5, 4, 7, 6]) } -} - -/// Reversing vector elements (swap endianness) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rev32) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vrev32_p16(a: poly16x4_t) -> poly16x4_t { - unsafe { simd_shuffle!(a, a, [1, 0, 3, 2]) } -} - -/// Reversing vector elements (swap endianness) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rev32) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vrev32q_p16(a: poly16x8_t) -> poly16x8_t { - unsafe { simd_shuffle!(a, a, [1, 0, 3, 2, 5, 4, 7, 6]) } -} - -/// Reversing vector elements (swap endianness) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rev32) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vrev32_u16(a: uint16x4_t) -> uint16x4_t { - unsafe { simd_shuffle!(a, a, [1, 0, 3, 2]) } -} - -/// Reversing vector elements (swap endianness) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rev32) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vrev32q_u16(a: uint16x8_t) -> uint16x8_t { - unsafe { simd_shuffle!(a, a, [1, 0, 3, 2, 5, 4, 7, 6]) } -} - -/// Reversing vector elements (swap endianness) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rev32) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vrev32_p8(a: poly8x8_t) -> poly8x8_t { - unsafe { simd_shuffle!(a, a, [3, 2, 1, 0, 7, 6, 5, 4]) } -} - -/// Reversing vector elements (swap endianness) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rev32) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vrev32q_p8(a: poly8x16_t) -> poly8x16_t { - unsafe { simd_shuffle!(a, a, [3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12]) } -} - -/// Reversing vector elements (swap endianness) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rev64) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vrev64_s8(a: int8x8_t) -> int8x8_t { - unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) } -} - -/// Reversing vector elements (swap endianness) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rev64) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vrev64q_s8(a: int8x16_t) -> int8x16_t { - unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8]) } -} - -/// Reversing vector elements (swap endianness) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rev64) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vrev64_s16(a: int16x4_t) -> int16x4_t { - unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) } -} - -/// Reversing vector elements (swap endianness) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rev64) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vrev64q_s16(a: int16x8_t) -> int16x8_t { - unsafe { simd_shuffle!(a, a, [3, 2, 1, 0, 7, 6, 5, 4]) } -} - -/// Reversing vector elements (swap endianness) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rev64) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vrev64_s32(a: int32x2_t) -> int32x2_t { - unsafe { simd_shuffle!(a, a, [1, 0]) } -} - -/// Reversing vector elements (swap endianness) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rev64) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vrev64q_s32(a: int32x4_t) -> int32x4_t { - unsafe { simd_shuffle!(a, a, [1, 0, 3, 2]) } -} - -/// Reversing vector elements (swap endianness) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rev64) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vrev64_u8(a: uint8x8_t) -> uint8x8_t { - unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) } -} - -/// Reversing vector elements (swap endianness) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rev64) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vrev64q_u8(a: uint8x16_t) -> uint8x16_t { - unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8]) } -} - -/// Reversing vector elements (swap endianness) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rev64) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vrev64_u16(a: uint16x4_t) -> uint16x4_t { - unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) } -} - -/// Reversing vector elements (swap endianness) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rev64) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vrev64q_u16(a: uint16x8_t) -> uint16x8_t { - unsafe { simd_shuffle!(a, a, [3, 2, 1, 0, 7, 6, 5, 4]) } -} - -/// Reversing vector elements (swap endianness) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rev64) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vrev64_u32(a: uint32x2_t) -> uint32x2_t { - unsafe { simd_shuffle!(a, a, [1, 0]) } -} - -/// Reversing vector elements (swap endianness) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rev64) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vrev64q_u32(a: uint32x4_t) -> uint32x4_t { - unsafe { simd_shuffle!(a, a, [1, 0, 3, 2]) } -} - -/// Reversing vector elements (swap endianness) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rev64) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vrev64_f32(a: float32x2_t) -> float32x2_t { - unsafe { simd_shuffle!(a, a, [1, 0]) } -} - -/// Reversing vector elements (swap endianness) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rev64) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vrev64q_f32(a: float32x4_t) -> float32x4_t { - unsafe { simd_shuffle!(a, a, [1, 0, 3, 2]) } -} - -/// Reversing vector elements (swap endianness) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rev64) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vrev64_p8(a: poly8x8_t) -> poly8x8_t { - unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) } -} - -/// Reversing vector elements (swap endianness) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rev64) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vrev64q_p8(a: poly8x16_t) -> poly8x16_t { - unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8]) } -} - -/// Reversing vector elements (swap endianness) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rev64) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vrev64_p16(a: poly16x4_t) -> poly16x4_t { - unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) } -} - -/// Reversing vector elements (swap endianness) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rev64) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vrev64q_p16(a: poly16x8_t) -> poly16x8_t { - unsafe { simd_shuffle!(a, a, [3, 2, 1, 0, 7, 6, 5, 4]) } -} - /* FIXME: 16-bit float /// Vector combine #[inline] diff --git a/crates/stdarch-gen-arm/spec/neon/aarch64.spec.yml b/crates/stdarch-gen-arm/spec/neon/aarch64.spec.yml index 41fdfc5382..6d6d199847 100644 --- a/crates/stdarch-gen-arm/spec/neon/aarch64.spec.yml +++ b/crates/stdarch-gen-arm/spec/neon/aarch64.spec.yml @@ -13,40 +13,10 @@ auto_llvm_sign_conversion: false neon-stable: &neon-stable FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']] -# #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -neon-unstable: &neon-unstable - FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']] - -# #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -neon-v7: &neon-v7 - FnCall: [cfg_attr, ['target_arch = "arm"', { FnCall: [target_feature, [ 'enable = "v7"']]} ]] - -# #[target_feature(enable = "neon,v7")] -enable-v7: &enable-v7 - FnCall: [target_feature, ['enable = "neon,v7"']] - -# #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -neon-v8: &neon-v8 - FnCall: [cfg_attr, ['target_arch = "arm"', { FnCall: [target_feature, [ 'enable = "v8"']]} ]] - -target-is-arm: &target-is-arm - FnCall: [cfg, ['target_arch = "arm"']] - # #[cfg(not(target_arch = "arm"))] target-not-arm: &target-not-arm FnCall: [cfg, [{ FnCall: [not, ['target_arch = "arm"']]}]] -neon-target-aarch64-arm64ec: &neon-target-aarch64-arm64ec - FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]] - -# #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] -neon-stable-not-arm: &neon-stable-not-arm - FnCall: [cfg_attr, [{ FnCall: [not, ['target_arch = "arm"']]}, *neon-stable]] - -#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] -neon-unstable-is-arm: &neon-unstable-is-arm - FnCall: [ cfg_attr, ['target_arch = "arm"', *neon-unstable]] - # #[cfg_attr(all(test, not(target_env = "msvc"))] msvc-disabled: &msvc-disabled FnCall: [all, [test, {FnCall: [not, ['target_env = "msvc"']]}]] @@ -74,10 +44,6 @@ enable-fhm: &enable-fhm enable-fcma: &enable-fcma FnCall: [cfg_attr, [{ FnCall: [not, ['target_arch = "arm"']]}, { FnCall: [target_feature, ['enable = "fcma"']] }]] -#[cfg_attr(not(target_arch = "arm"), unstable(feature = "stdarch_neon_i8mm", issue = "117223"))] -neon-unstable-i8mm: &neon-unstable-i8mm - FnCall: [cfg_attr, [{ FnCall: [not, ['target_arch = "arm"']] }, { FnCall: [unstable, ['feature = "stdarch_neon_i8mm"', 'issue = "117223"']] } ]] - # #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] neon-unstable-fcma: &neon-unstable-fcma FnCall: [unstable, ['feature = "stdarch_neon_fcma"', 'issue = "117222"']] diff --git a/crates/stdarch-gen-arm/spec/neon/arm_shared.spec.yml b/crates/stdarch-gen-arm/spec/neon/arm_shared.spec.yml index b9be94642d..a3b756d557 100644 --- a/crates/stdarch-gen-arm/spec/neon/arm_shared.spec.yml +++ b/crates/stdarch-gen-arm/spec/neon/arm_shared.spec.yml @@ -10,9 +10,13 @@ auto_big_endian: true neon-stable: &neon-stable FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']] +# #[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] +neon-cfg-arm-unstable: &neon-cfg-arm-unstable + FnCall: ['cfg_attr', ['target_arch = "arm"', {FnCall: ['unstable', ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + # #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -neon-unstable: &neon-unstable - FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']] +neon-arm-unstable: &neon-arm-unstable + FnCall: ['unstable', ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']] # #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] neon-v7: &neon-v7 @@ -40,12 +44,8 @@ neon-target-aarch64-arm64ec: &neon-target-aarch64-arm64ec FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]] # #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] -neon-stable-not-arm: &neon-stable-not-arm - FnCall: [cfg_attr, [{ FnCall: [not, ['target_arch = "arm"']]}, *neon-stable]] - -#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] -neon-unstable-is-arm: &neon-unstable-is-arm - FnCall: [ cfg_attr, ['target_arch = "arm"', *neon-unstable]] +neon-not-arm-stable: &neon-not-arm-stable + FnCall: [cfg_attr, [{ FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] # #[cfg_attr(all(test, not(target_env = "msvc"))] msvc-disabled: &msvc-disabled @@ -97,8 +97,8 @@ intrinsics: - *neon-v7 - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vand]]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [and]]}]] - - *neon-stable-not-arm - - *neon-unstable-is-arm + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - int8x8_t @@ -131,8 +131,8 @@ intrinsics: - *neon-v7 - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vorr]]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [orr]]}]] - - *neon-stable-not-arm - - *neon-unstable-is-arm + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - int8x8_t @@ -165,8 +165,8 @@ intrinsics: - *neon-v7 - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [veor]]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [eor]]}]] - - *neon-stable-not-arm - - *neon-unstable-is-arm + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - int8x8_t @@ -199,8 +199,8 @@ intrinsics: - *neon-v7 - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vabd.{neon_type}"']]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [sabd]]}]] - - *neon-stable-not-arm - - *neon-unstable-is-arm + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - int8x8_t @@ -226,8 +226,8 @@ intrinsics: - *neon-v7 - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vabd.{neon_type}"']]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [uabd]]}]] - - *neon-stable-not-arm - - *neon-unstable-is-arm + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - uint8x8_t @@ -253,8 +253,8 @@ intrinsics: - *neon-v7 - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vabd.f32"']]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [fabd]]}]] - - *neon-stable-not-arm - - *neon-unstable-is-arm + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - float32x2_t @@ -296,12 +296,7 @@ intrinsics: arguments: ["a: {neon_type[0]}", "b: {neon_type[0]}"] return_type: "{neon_type[1]}" attr: - - FnCall: - - cfg_attr - - - 'target_arch = "arm"' - - FnCall: - - target_feature - - - 'enable = "v7"' + - *neon-v7 - FnCall: - cfg_attr - - FnCall: @@ -323,22 +318,8 @@ intrinsics: - FnCall: - assert_instr - - sabdl - - FnCall: - - cfg_attr - - - FnCall: - - not - - - 'target_arch = "arm"' - - FnCall: - - stable - - - 'feature = "neon_intrinsics"' - - 'since = "1.59.0"' - - FnCall: - - cfg_attr - - - 'target_arch = "arm"' - - FnCall: - - unstable - - - 'feature = "stdarch_arm_neon_intrinsics"' - - 'issue = "111800"' + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - [int8x8_t, int16x8_t, uint8x8_t] @@ -363,11 +344,11 @@ intrinsics: arguments: ["a: {neon_type[0]}", "b: {neon_type[0]}"] return_type: "{neon_type[1]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, ['"vceq{type[2]}"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [cmeq]]}]] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vceq{type[2]}"']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [cmeq]]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - [uint8x8_t, uint8x8_t, ".i8"] @@ -392,11 +373,11 @@ intrinsics: arguments: ["a: {neon_type[0]}", "b: {neon_type[0]}"] return_type: "{neon_type[1]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, ['"vceq.f32"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [fcmeq]]}]] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vceq.f32"']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [fcmeq]]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - [float32x2_t, uint32x2_t] @@ -411,8 +392,8 @@ intrinsics: return_type: "{neon_type[1]}" attr: - *neon-v8 - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, ['"vceq.f16"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [fcmeq]]}]] + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vceq.f16"']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [fcmeq]]}]] - *neon-fp16 - *neon-unstable-f16 safety: safe @@ -427,11 +408,11 @@ intrinsics: arguments: ["a: {neon_type[0]}", "b: {neon_type[0]}"] return_type: "{neon_type[1]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, [vtst]]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [cmtst]]}]] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vtst]]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [cmtst]]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - [int8x8_t, uint8x8_t, i8x8, 'i8x8::new(0, 0, 0, 0, 0, 0, 0, 0)'] @@ -454,11 +435,11 @@ intrinsics: arguments: ["a: {neon_type}"] return_type: "{neon_type}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, [vabs]]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [fabs]]}]] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vabs]]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [fabs]]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - float32x2_t @@ -472,8 +453,8 @@ intrinsics: return_type: "{neon_type}" attr: - *neon-v8 - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, [vabs]]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [fabs]]}]] + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vabs]]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [fabs]]}]] - *neon-fp16 - *neon-unstable-f16 safety: safe @@ -489,8 +470,8 @@ intrinsics: return_type: "{type[1]}" attr: - *neon-v8 - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, [vabs]]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [fabs]]}]] + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vabs]]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [fabs]]}]] - *neon-fp16 - *neon-unstable-f16 safety: safe @@ -509,11 +490,11 @@ intrinsics: arguments: ["a: {neon_type[0]}", "b: {neon_type[0]}"] return_type: "{neon_type[1]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, ['"vcgt.{type[2]}"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [cmgt]]}]] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vcgt.{type[2]}"']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [cmgt]]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - [int8x8_t, uint8x8_t, "s8"] @@ -530,11 +511,11 @@ intrinsics: arguments: ["a: {neon_type}", "b: {neon_type}"] return_type: "{neon_type}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, ['"vcgt.{neon_type}"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [cmhi]]}]] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vcgt.{neon_type}"']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [cmhi]]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - uint8x8_t @@ -551,11 +532,11 @@ intrinsics: arguments: ["a: {neon_type[0]}", "b: {neon_type[0]}"] return_type: "{neon_type[1]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, ['"vcgt.f32"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [fcmgt]]}]] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vcgt.f32"']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [fcmgt]]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - [float32x2_t, uint32x2_t] @@ -570,8 +551,8 @@ intrinsics: return_type: "{neon_type[1]}" attr: - *neon-v8 - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, ['"vcgt.f16"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [fcmgt]]}]] + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vcgt.f16"']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [fcmgt]]}]] - *neon-fp16 - *neon-unstable-f16 safety: safe @@ -588,8 +569,8 @@ intrinsics: return_type: "{neon_type[1]}" attr: - *neon-v8 - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, ['"vcgt.f16"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [fcmgt]]}]] + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vcgt.f16"']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [fcmgt]]}]] - *neon-fp16 - *neon-unstable-f16 safety: safe @@ -605,11 +586,11 @@ intrinsics: arguments: ["a: {neon_type[0]}", "b: {neon_type[0]}"] return_type: "{neon_type[1]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, ['"vcgt.{neon_type[0]}"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [cmgt]]}]] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vcgt.{neon_type[0]}"']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [cmgt]]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - [int8x8_t, uint8x8_t] @@ -626,11 +607,11 @@ intrinsics: arguments: ["a: {neon_type[0]}", "b: {neon_type[0]}"] return_type: "{neon_type[1]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, ['"vcge.{neon_type[0]}"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [cmge]]}]] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vcge.{neon_type[0]}"']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [cmge]]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - [int8x8_t, uint8x8_t] @@ -647,11 +628,11 @@ intrinsics: arguments: ["a: {neon_type[0]}", "b: {neon_type[0]}"] return_type: "{neon_type[1]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, ['"vcge.f32"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [fcmge]]}]] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vcge.f32"']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [fcmge]]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - [float32x2_t, uint32x2_t] @@ -666,8 +647,8 @@ intrinsics: return_type: "{neon_type[1]}" attr: - *neon-v8 - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, ['"vcge.f16"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [fcmge]]}]] + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vcge.f16"']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [fcmge]]}]] - *neon-fp16 - *neon-unstable-f16 safety: safe @@ -683,8 +664,8 @@ intrinsics: return_type: "{neon_type[1]}" attr: - *neon-v8 - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, ['"vcle.f16"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [fcmle]]}]] + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vcle.f16"']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [fcmle]]}]] - *neon-fp16 - *neon-unstable-f16 safety: safe @@ -703,11 +684,11 @@ intrinsics: arguments: ["a: {neon_type[0]}", "b: {neon_type[0]}"] return_type: "{neon_type[1]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, ['"vcge.{neon_type[0]}"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [cmge]]}]] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vcge.{neon_type[0]}"']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [cmge]]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - [int8x8_t, uint8x8_t] @@ -724,11 +705,11 @@ intrinsics: arguments: ["a: {neon_type}"] return_type: "{neon_type}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, ['"vcls.{neon_type}"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [cls]]}]] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vcls.{neon_type}"']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [cls]]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - int8x8_t @@ -751,11 +732,11 @@ intrinsics: arguments: ["a: {neon_type[0]}"] return_type: "{neon_type[1]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, [vcls]]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [cls]]}]] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vcls]]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [cls]]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - [uint8x8_t, int8x8_t] @@ -774,11 +755,11 @@ intrinsics: arguments: ["a: {neon_type[0]}"] return_type: "{neon_type[0]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, ['"vclz.i8"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [clz]]}]] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vclz.i8"']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [clz]]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - [uint8x8_t, int8x8_t] @@ -795,11 +776,11 @@ intrinsics: arguments: ["a: {neon_type[0]}"] return_type: "{neon_type[0]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, ['"vclz{type[1]}"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [clz]]}]] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vclz{type[1]}"']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [clz]]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - [int8x8_t, '.i8'] @@ -822,11 +803,11 @@ intrinsics: arguments: ["a: {neon_type[0]}"] return_type: "{neon_type[0]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, ['"vclz{type[1]}"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [clz]]}]] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vclz{type[1]}"']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [clz]]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - [uint32x2_t, '.i32', int32x2_t] @@ -845,11 +826,11 @@ intrinsics: arguments: ["a: {neon_type[0]}", "b: {neon_type[0]}"] return_type: "{neon_type[1]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, ['"vacgt.f32"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [facgt]]}]] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vacgt.f32"']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [facgt]]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - [float32x2_t, uint32x2_t] @@ -870,8 +851,8 @@ intrinsics: return_type: "{neon_type[1]}" attr: - *neon-v8 - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, ['"vacgt.f16"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [facgt]]}]] + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vacgt.f16"']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [facgt]]}]] - *neon-fp16 - *neon-unstable-f16 safety: safe @@ -892,11 +873,11 @@ intrinsics: arguments: ["a: {neon_type[0]}", "b: {neon_type[0]}"] return_type: "{neon_type[1]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, ['"vacge.f32"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [facge]]}]] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vacge.f32"']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [facge]]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - [float32x2_t, uint32x2_t] @@ -916,8 +897,8 @@ intrinsics: return_type: "{neon_type[1]}" attr: - *neon-v8 - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, ['"vacge.f16"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [facge]]}]] + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vacge.f16"']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [facge]]}]] - *neon-fp16 - *neon-unstable-f16 safety: safe @@ -938,11 +919,11 @@ intrinsics: arguments: ["a: {neon_type[0]}", "b: {neon_type[0]}"] return_type: "{neon_type[1]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, ['"vacgt.f32"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [facgt]]}]] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vacgt.f32"']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [facgt]]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - [float32x2_t, uint32x2_t] @@ -956,8 +937,8 @@ intrinsics: return_type: "{neon_type[1]}" attr: - *neon-v8 - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, ['"vacgt.f16"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [facgt]]}]] + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vacgt.f16"']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [facgt]]}]] - *neon-fp16 - *neon-unstable-f16 safety: safe @@ -972,11 +953,11 @@ intrinsics: arguments: ["a: {neon_type[0]}", "b: {neon_type[0]}"] return_type: "{neon_type[1]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, ['"vacge.f32"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [facge]]}]] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vacge.f32"']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [facge]]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - [float32x2_t, uint32x2_t] @@ -991,8 +972,8 @@ intrinsics: return_type: "{neon_type[1]}" attr: - *neon-v8 - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, ['"vacge.f16"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [facge]]}]] + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vacge.f16"']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [facge]]}]] - *neon-fp16 - *neon-unstable-f16 safety: safe @@ -1007,11 +988,11 @@ intrinsics: arguments: ["a: {neon_type[0]}"] return_type: "{neon_type[1]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, [vcvt]]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [scvtf]]}]] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vcvt]]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [scvtf]]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - [int32x2_t, float32x2_t] @@ -1025,8 +1006,8 @@ intrinsics: return_type: "{neon_type[1]}" attr: - *neon-v7 - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, [vcvt]]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [scvtf]]}]] + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vcvt]]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [scvtf]]}]] - *neon-fp16 - *neon-unstable-f16 safety: safe @@ -1041,11 +1022,11 @@ intrinsics: arguments: ["a: {neon_type[0]}"] return_type: "{neon_type[1]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, [vcvt]]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [ucvtf]]}]] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vcvt]]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [ucvtf]]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - [uint32x2_t, float32x2_t] @@ -1059,8 +1040,8 @@ intrinsics: return_type: "{neon_type[1]}" attr: - *neon-v7 - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, [vcvt]]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [ucvtf]]}]] + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vcvt]]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [ucvtf]]}]] - *neon-fp16 - *neon-unstable-f16 safety: safe @@ -1075,11 +1056,11 @@ intrinsics: arguments: ["a: {neon_type[0]}"] return_type: "{neon_type[1]}" attr: - - FnCall: [cfg, ['target_arch = "arm"']] - - FnCall: [target_feature, ['enable = "neon,v7"']] + - *target-is-arm + - *enable-v7 - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [vcvt, 'N = 2']]}]] - FnCall: [rustc_legacy_const_generics, ['1']] - - FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']] + - *neon-arm-unstable static_defs: ['const N: i32'] safety: safe types: @@ -1105,7 +1086,7 @@ intrinsics: - FnCall: [cfg, [{FnCall: [not, ['target_arch = "arm"']]}]] - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [ucvtf, 'N = 2']]}]] - FnCall: [rustc_legacy_const_generics, ['1']] - - FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']] + - *neon-stable static_defs: ['const N: i32'] safety: safe types: @@ -1220,11 +1201,11 @@ intrinsics: arguments: ["a: {neon_type[0]}"] return_type: "{neon_type[1]}" attr: - - FnCall: [cfg, ['target_arch = "arm"']] - - FnCall: [target_feature, ['enable = "neon,v7"']] + - *target-is-arm + - *enable-v7 - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [vcvt, 'N = 2']]}]] - FnCall: [rustc_legacy_const_generics, ['1']] - - FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']] + - *neon-arm-unstable static_defs: ['const N: i32'] safety: safe types: @@ -1281,7 +1262,7 @@ intrinsics: - FnCall: [cfg, [{FnCall: [not, ['target_arch = "arm"']]}]] - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [scvtf, 'N = 2']]}]] - FnCall: [rustc_legacy_const_generics, ['1']] - - FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']] + - *neon-stable static_defs: ['const N: i32'] safety: safe types: @@ -1305,10 +1286,10 @@ intrinsics: return_type: "{neon_type[1]}" attr: - *target-is-arm - - FnCall: [target_feature, ['enable = "neon,v7"']] + - *enable-v7 - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [vcvt, 'N = 2']]}]] - FnCall: [rustc_legacy_const_generics, ['1']] - - FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']] + - *neon-arm-unstable static_defs: ['const N: i32'] safety: safe types: @@ -1330,10 +1311,10 @@ intrinsics: return_type: "{neon_type[1]}" attr: - *target-is-arm - - FnCall: [target_feature, ['enable = "neon,v7"']] + - *enable-v7 - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [vcvt, 'N = 2']]}]] - FnCall: [rustc_legacy_const_generics, ['1']] - - FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']] + - *neon-arm-unstable static_defs: ['const N: i32'] safety: safe types: @@ -1357,7 +1338,7 @@ intrinsics: - FnCall: [cfg, [{FnCall: [not, ['target_arch = "arm"']]}]] - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [fcvtzs, 'N = 2']]}]] - FnCall: [rustc_legacy_const_generics, ['1']] - - FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']] + - *neon-stable static_defs: ['const N: i32'] safety: safe types: @@ -1381,7 +1362,7 @@ intrinsics: - FnCall: [cfg, [{FnCall: [not, ['target_arch = "arm"']]}]] - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [fcvtzu, 'N = 2']]}]] - FnCall: [rustc_legacy_const_generics, ['1']] - - FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']] + - *neon-stable static_defs: ['const N: i32'] safety: safe types: @@ -1404,12 +1385,12 @@ intrinsics: arguments: ["a: {neon_type[1]}"] return_type: "{neon_type[2]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, ['"vdup.8"', 'N = 4']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [dup, 'N = 4']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vdup.8"', 'N = 4']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [dup, 'N = 4']]}]] - FnCall: [rustc_legacy_const_generics, ['1']] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable static_defs: ['const N: i32'] safety: safe types: @@ -1428,12 +1409,12 @@ intrinsics: arguments: ["a: {neon_type[1]}"] return_type: "{neon_type[2]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, ['"vdup.8"', 'N = 8']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [dup, 'N = 8']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vdup.8"', 'N = 8']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [dup, 'N = 8']]}]] - FnCall: [rustc_legacy_const_generics, ['1']] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable static_defs: ['const N: i32'] safety: safe types: @@ -1452,12 +1433,12 @@ intrinsics: arguments: ["a: {neon_type[1]}"] return_type: "{neon_type[2]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, ['"vdup.16"', 'N = 2']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [dup, 'N = 2']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vdup.16"', 'N = 2']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [dup, 'N = 2']]}]] - FnCall: [rustc_legacy_const_generics, ['1']] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable static_defs: ['const N: i32'] safety: safe types: @@ -1476,12 +1457,12 @@ intrinsics: arguments: ["a: {neon_type[1]}"] return_type: "{neon_type[2]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, ['"vdup.16"', 'N = 4']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [dup, 'N = 4']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vdup.16"', 'N = 4']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [dup, 'N = 4']]}]] - FnCall: [rustc_legacy_const_generics, ['1']] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable static_defs: ['const N: i32'] safety: safe types: @@ -1502,8 +1483,8 @@ intrinsics: return_type: "{neon_type[2]}" attr: - *neon-v7 - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, ['"vdup.16"', 'N = 4']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [dup, 'N = 4']]}]] + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vdup.16"', 'N = 4']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [dup, 'N = 4']]}]] - FnCall: [rustc_legacy_const_generics, ['1']] - *neon-fp16 - *neon-unstable-f16 @@ -1522,8 +1503,8 @@ intrinsics: return_type: "{neon_type[0]}" attr: - *neon-v7 - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, ['"vdup.16"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [dup]]}]] + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vdup.16"']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [dup]]}]] - *neon-fp16 - *neon-unstable-f16 safety: safe @@ -1539,8 +1520,8 @@ intrinsics: return_type: "{neon_type[2]}" attr: - *neon-v7 - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, ['"vdup.16"', 'N = 2']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [dup, 'N = 2']]}]] + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vdup.16"', 'N = 2']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [dup, 'N = 2']]}]] - FnCall: [rustc_legacy_const_generics, ['1']] - *neon-fp16 - *neon-unstable-f16 @@ -1559,12 +1540,12 @@ intrinsics: arguments: ["a: {neon_type[1]}"] return_type: "{neon_type[2]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, ['"vdup.32"', 'N = 1']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [dup, 'N = 1']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vdup.32"', 'N = 1']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [dup, 'N = 1']]}]] - FnCall: [rustc_legacy_const_generics, ['1']] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable static_defs: ['const N: i32'] safety: safe types: @@ -1583,12 +1564,12 @@ intrinsics: arguments: ["a: {neon_type[1]}"] return_type: "{neon_type[2]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, ['"vdup.32"', 'N = 2']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [dup, 'N = 2']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vdup.32"', 'N = 2']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [dup, 'N = 2']]}]] - FnCall: [rustc_legacy_const_generics, ['1']] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable static_defs: ['const N: i32'] safety: safe types: @@ -1607,12 +1588,12 @@ intrinsics: arguments: ["a: {neon_type[1]}"] return_type: "{neon_type[1]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, [vmov, 'N = 1']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [dup, 'N = 1']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vmov, 'N = 1']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [dup, 'N = 1']]}]] - FnCall: [rustc_legacy_const_generics, ['1']] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable static_defs: ['const N: i32'] safety: safe types: @@ -1627,12 +1608,12 @@ intrinsics: arguments: ["a: {neon_type[1]}"] return_type: "{neon_type[2]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, [vmov, 'N = 0']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [dup, 'N = 0']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vmov, 'N = 0']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [dup, 'N = 0']]}]] - FnCall: [rustc_legacy_const_generics, ['1']] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable static_defs: ['const N: i32'] safety: safe types: @@ -1647,12 +1628,12 @@ intrinsics: arguments: ["a: {neon_type[1]}"] return_type: "{neon_type[1]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, [nop, 'N = 0']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [nop, 'N = 0']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [nop, 'N = 0']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [nop, 'N = 0']]}]] - FnCall: [rustc_legacy_const_generics, ['1']] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable static_defs: ['const N: i32'] safety: safe types: @@ -1667,12 +1648,12 @@ intrinsics: arguments: ["a: {neon_type[1]}"] return_type: "{neon_type[2]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, [vmov, 'N = 1']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [nop, 'N = 1']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vmov, 'N = 1']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [nop, 'N = 1']]}]] - FnCall: [rustc_legacy_const_generics, ['1']] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable static_defs: ['const N: i32'] safety: safe types: @@ -1689,12 +1670,12 @@ intrinsics: arguments: ["a: {neon_type[0]}", "b: {neon_type[0]}"] return_type: "{neon_type[0]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, ['"vext.8"', 'N = 7']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [ext, 'N = 7']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vext.8"', 'N = 7']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [ext, 'N = 7']]}]] - FnCall: [rustc_legacy_const_generics, ['2']] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable static_defs: ['const N: i32'] safety: safe types: @@ -1713,12 +1694,12 @@ intrinsics: arguments: ["a: {neon_type[0]}", "b: {neon_type[0]}"] return_type: "{neon_type[0]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, ['"vext.8"', 'N = 15']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [ext, 'N = 15']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vext.8"', 'N = 15']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [ext, 'N = 15']]}]] - FnCall: [rustc_legacy_const_generics, ['2']] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable static_defs: ['const N: i32'] safety: safe types: @@ -1734,12 +1715,12 @@ intrinsics: arguments: ["a: {neon_type[0]}", "b: {neon_type[0]}"] return_type: "{neon_type[0]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, ['"vext.8"', 'N = 3']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [ext, 'N = 3']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vext.8"', 'N = 3']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [ext, 'N = 3']]}]] - FnCall: [rustc_legacy_const_generics, ['2']] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable static_defs: ['const N: i32'] safety: safe types: @@ -1760,8 +1741,8 @@ intrinsics: return_type: "{neon_type[0]}" attr: - *neon-v7 - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, ['"vext.8"', 'N = 3']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [ext, 'N = 3']]}]] + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vext.8"', 'N = 3']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [ext, 'N = 3']]}]] - FnCall: [rustc_legacy_const_generics, ['2']] - *neon-fp16 - *neon-unstable-f16 @@ -1778,8 +1759,8 @@ intrinsics: return_type: "{neon_type[0]}" attr: - *neon-v7 - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, ['"vext.8"', 'N = 7']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [ext, 'N = 7']]}]] + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vext.8"', 'N = 7']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [ext, 'N = 7']]}]] - FnCall: [rustc_legacy_const_generics, ['2']] - *neon-fp16 - *neon-unstable-f16 @@ -1797,12 +1778,12 @@ intrinsics: arguments: ["a: {neon_type[0]}", "b: {neon_type[0]}"] return_type: "{neon_type[0]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, ['"vext.8"', 'N = 1']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [ext, 'N = 1']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vext.8"', 'N = 1']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [ext, 'N = 1']]}]] - FnCall: [rustc_legacy_const_generics, ['2']] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable static_defs: ['const N: i32'] safety: safe types: @@ -1818,12 +1799,12 @@ intrinsics: arguments: ["a: {neon_type[0]}", "b: {neon_type[0]}"] return_type: "{neon_type[0]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, [vmov, 'N = 1']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [ext, 'N = 1']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vmov, 'N = 1']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [ext, 'N = 1']]}]] - FnCall: [rustc_legacy_const_generics, ['2']] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable static_defs: ['const N: i32'] safety: safe types: @@ -1838,11 +1819,11 @@ intrinsics: arguments: ["a: {neon_type[0]}", "b: {neon_type[0]}", "c: {neon_type[0]}"] return_type: "{neon_type[0]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, ['"vmla{type[1]}"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [mla]]}]] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vmla{type[1]}"']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [mla]]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - [int8x8_t, ".i8"] @@ -1865,11 +1846,11 @@ intrinsics: arguments: ["a: {neon_type}", "b: {neon_type}", "c: {neon_type}"] return_type: "{neon_type}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, ['"vmla.f32"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [fmul]]}]] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vmla.f32"']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [fmul]]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - float32x2_t @@ -1882,11 +1863,11 @@ intrinsics: arguments: ["a: {neon_type[0]}", "b: {neon_type[1]}", "c: {neon_type[1]}"] return_type: "{neon_type[0]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, ['"vmlal.{type[2]}"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [smlal]]}]] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vmlal.{type[2]}"']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [smlal]]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - [int16x8_t, int8x8_t, "s8"] @@ -1900,11 +1881,11 @@ intrinsics: arguments: ["a: {neon_type[0]}", "b: {neon_type[1]}", "c: {type[2]}"] return_type: "{neon_type[0]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, ['"vmlal.{type[4]}"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [smlal]]}]] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vmlal.{type[4]}"']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [smlal]]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - [int32x4_t, int16x4_t, "i16", int32x4_t, 's16'] @@ -1921,11 +1902,11 @@ intrinsics: arguments: ["a: {neon_type[0]}", "b: {neon_type[1]}", "c: {type[2]}"] return_type: "{neon_type[0]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, ['"vmlal.{type[2]}"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [umlal]]}]] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vmlal.{type[2]}"']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [umlal]]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - [uint32x4_t, uint16x4_t, "u16", uint32x4_t] @@ -1942,12 +1923,12 @@ intrinsics: arguments: ["a: {neon_type[0]}", "b: {neon_type[1]}", "c: {neon_type[2]}"] return_type: "{neon_type[0]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, ['"vmlal.{neon_type[1]}"', 'LANE = 1']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [smlal, 'LANE = 1']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vmlal.{neon_type[1]}"', 'LANE = 1']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [smlal, 'LANE = 1']]}]] - FnCall: [rustc_legacy_const_generics, ['3']] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable static_defs: ['const LANE: i32'] safety: safe types: @@ -1968,12 +1949,12 @@ intrinsics: arguments: ["a: {neon_type[0]}", "b: {neon_type[1]}", "c: {neon_type[2]}"] return_type: "{neon_type[0]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, ['"vmlal.{neon_type[1]}"', 'LANE = 1']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [umlal, 'LANE = 1']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vmlal.{neon_type[1]}"', 'LANE = 1']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [umlal, 'LANE = 1']]}]] - FnCall: [rustc_legacy_const_generics, ['3']] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable static_defs: ['const LANE: i32'] safety: safe types: @@ -1994,11 +1975,11 @@ intrinsics: arguments: ["a: {neon_type[0]}", "b: {neon_type[1]}", "c: {neon_type[1]}"] return_type: "{neon_type[0]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, ['"vmlal.{neon_type[1]}"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [umlal]]}]] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vmlal.{neon_type[1]}"']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [umlal]]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - [uint16x8_t, uint8x8_t] @@ -2015,11 +1996,11 @@ intrinsics: arguments: ["a: {neon_type[0]}", "b: {neon_type[0]}", "c: {neon_type[0]}"] return_type: "{neon_type[0]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, ['"vmls{type[1]}"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [mls]]}]] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vmls{type[1]}"']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [mls]]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - [int8x8_t, '.i8'] @@ -2045,11 +2026,11 @@ intrinsics: arguments: ["a: {neon_type[0]}", "b: {neon_type[1]}", "c: {neon_type[1]}"] return_type: "{neon_type[0]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, ['"vmlsl.{neon_type[1]}"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [smlsl]]}]] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vmlsl.{neon_type[1]}"']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [smlsl]]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - [int16x8_t, int8x8_t] @@ -2063,11 +2044,11 @@ intrinsics: arguments: ["a: {neon_type[0]}", "b: {neon_type[1]}", "c: {type[2]}"] return_type: "{neon_type[0]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, ['"vmlsl.{neon_type[1]}"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [smlsl]]}]] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vmlsl.{neon_type[1]}"']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [smlsl]]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - [int32x4_t, int16x4_t, "i16"] @@ -2080,11 +2061,11 @@ intrinsics: arguments: ["a: {neon_type[0]}", "b: {neon_type[1]}", "c: {type[2]}"] return_type: "{neon_type[0]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, ['"vmlsl.{neon_type[1]}"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [umlsl]]}]] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vmlsl.{neon_type[1]}"']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [umlsl]]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - [uint32x4_t, uint16x4_t, "u16"] @@ -2097,12 +2078,12 @@ intrinsics: arguments: ["a: {neon_type[0]}", "b: {neon_type[1]}", "c: {neon_type[2]}"] return_type: "{neon_type[0]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, ['"vmlsl.{neon_type[1]}"', 'LANE = 1']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [smlsl, 'LANE = 1']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vmlsl.{neon_type[1]}"', 'LANE = 1']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [smlsl, 'LANE = 1']]}]] - FnCall: [rustc_legacy_const_generics, ['3']] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable static_defs: ['const LANE: i32'] safety: safe types: @@ -2121,12 +2102,12 @@ intrinsics: arguments: ["a: {neon_type[0]}", "b: {neon_type[1]}", "c: {neon_type[2]}"] return_type: "{neon_type[0]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, ['"vmlsl.{neon_type[1]}"', 'LANE = 1']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [smlsl, 'LANE = 1']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vmlsl.{neon_type[1]}"', 'LANE = 1']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [smlsl, 'LANE = 1']]}]] - FnCall: [rustc_legacy_const_generics, ['3']] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable static_defs: ['const LANE: i32'] safety: safe types: @@ -2145,12 +2126,12 @@ intrinsics: arguments: ["a: {neon_type[0]}", "b: {neon_type[1]}", "c: {neon_type[2]}"] return_type: "{neon_type[0]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, ['"vmlsl.{neon_type[1]}"', 'LANE = 1']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [umlsl, 'LANE = 1']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vmlsl.{neon_type[1]}"', 'LANE = 1']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [umlsl, 'LANE = 1']]}]] - FnCall: [rustc_legacy_const_generics, ['3']] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable static_defs: ['const LANE: i32'] safety: safe types: @@ -2171,11 +2152,11 @@ intrinsics: arguments: ["a: {neon_type[0]}", "b: {neon_type[1]}", "c: {neon_type[1]}"] return_type: "{neon_type[0]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, ['"vmlsl.{neon_type[1]}"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [umlsl]]}]] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vmlsl.{neon_type[1]}"']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [umlsl]]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - [uint16x8_t, uint8x8_t] @@ -2192,8 +2173,8 @@ intrinsics: - *neon-v7 - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vneg.{type[1]}"']]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [neg]]}]] - - *neon-stable-not-arm - - *neon-unstable-is-arm + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - [int8x8_t, 's8'] @@ -2213,8 +2194,8 @@ intrinsics: - *neon-v7 - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vneg.{type[1]}"']]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [fneg]]}]] - - *neon-stable-not-arm - - *neon-unstable-is-arm + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - [float32x2_t, 'f32'] @@ -2247,8 +2228,8 @@ intrinsics: - *neon-v7 - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vqneg.{type[1]}"']]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [sqneg]]}]] - - *neon-stable-not-arm - - *neon-unstable-is-arm + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - [int8x8_t, 's8', 'i8'] @@ -2274,8 +2255,8 @@ intrinsics: - *neon-v7 - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vqsub.{type[1]}"']]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [uqsub]]}]] - - *neon-stable-not-arm - - *neon-unstable-is-arm + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - [uint8x8_t, u8, i8] @@ -2303,8 +2284,8 @@ intrinsics: - *neon-v7 - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vqsub.{type[1]}"']]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [sqsub]]}]] - - *neon-stable-not-arm - - *neon-unstable-is-arm + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - [int8x8_t, s8, i8] @@ -2329,12 +2310,7 @@ intrinsics: arguments: ["a: {neon_type}", "b: {neon_type}"] return_type: "{neon_type}" attr: - - FnCall: - - cfg_attr - - - 'target_arch = "arm"' - - FnCall: - - target_feature - - - 'enable = "v7"' + - *neon-v7 - FnCall: - cfg_attr - - FnCall: @@ -2356,22 +2332,8 @@ intrinsics: - FnCall: - assert_instr - - uhadd - - FnCall: - - cfg_attr - - - FnCall: - - not - - - 'target_arch = "arm"' - - FnCall: - - stable - - - 'feature = "neon_intrinsics"' - - 'since = "1.59.0"' - - FnCall: - - cfg_attr - - - 'target_arch = "arm"' - - FnCall: - - unstable - - - 'feature = "stdarch_arm_neon_intrinsics"' - - 'issue = "111800"' + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - uint8x8_t @@ -2394,12 +2356,7 @@ intrinsics: arguments: ["a: {neon_type}", "b: {neon_type}"] return_type: "{neon_type}" attr: - - FnCall: - - cfg_attr - - - 'target_arch = "arm"' - - FnCall: - - target_feature - - - 'enable = "v7"' + - *neon-v7 - FnCall: - cfg_attr - - FnCall: @@ -2421,22 +2378,8 @@ intrinsics: - FnCall: - assert_instr - - shadd - - FnCall: - - cfg_attr - - - FnCall: - - not - - - 'target_arch = "arm"' - - FnCall: - - stable - - - 'feature = "neon_intrinsics"' - - 'since = "1.59.0"' - - FnCall: - - cfg_attr - - - 'target_arch = "arm"' - - FnCall: - - unstable - - - 'feature = "stdarch_arm_neon_intrinsics"' - - 'issue = "111800"' + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - int8x8_t @@ -2462,8 +2405,8 @@ intrinsics: - *neon-v7 - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vrhadd.{neon_type}"']]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [srhadd]]}]] - - *neon-stable-not-arm - - *neon-unstable-is-arm + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - int8x8_t @@ -2489,8 +2432,8 @@ intrinsics: - *neon-v7 - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vrhadd.{neon_type}"']]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [urhadd]]}]] - - *neon-stable-not-arm - - *neon-unstable-is-arm + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - uint8x8_t @@ -2516,8 +2459,8 @@ intrinsics: - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [target_feature, ['enable = "fp-armv8,v8"']]}]] - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vrintn]]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [frintn]]}]] - - *neon-stable-not-arm - - *neon-unstable-is-arm + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - float32x2_t @@ -2562,8 +2505,8 @@ intrinsics: - *neon-v7 - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vqadd.{neon_type}"']]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [uqadd]]}]] - - *neon-stable-not-arm - - *neon-unstable-is-arm + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - uint8x8_t @@ -2591,8 +2534,8 @@ intrinsics: - *neon-v7 - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vqadd.{neon_type}"']]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [sqadd]]}]] - - *neon-stable-not-arm - - *neon-unstable-is-arm + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - int8x8_t @@ -2620,8 +2563,8 @@ intrinsics: - *neon-v7 - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vld1]]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [ld1]]}]] - - *neon-stable-not-arm - - *neon-unstable-is-arm + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: unsafe: [neon] types: @@ -2648,8 +2591,8 @@ intrinsics: - *neon-v7 - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vld1]]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [ld1]]}]] - - *neon-stable-not-arm - - *neon-unstable-is-arm + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: unsafe: [neon] types: @@ -2694,8 +2637,8 @@ intrinsics: - *neon-v7 - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vld1]]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [ld1]]}]] - - *neon-stable-not-arm - - *neon-unstable-is-arm + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: unsafe: [neon] types: @@ -2753,8 +2696,8 @@ intrinsics: - *neon-v8 - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [nop]]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [ld1]]}]] - - *neon-stable-not-arm - - *neon-unstable-is-arm + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: unsafe: [neon] types: @@ -2781,8 +2724,8 @@ intrinsics: - *neon-v8 - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vld1]]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [ld1]]}]] - - *neon-stable-not-arm - - *neon-unstable-is-arm + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: unsafe: [neon] types: @@ -2862,7 +2805,7 @@ intrinsics: - ["*const f16", float16x4_t, '_dup', 'f16x4', "[0, 0, 0, 0]"] - ["*const f16", float16x8_t, 'q_dup', 'f16x8', "[0, 0, 0, 0, 0, 0, 0, 0]"] compose: - - Let: [x, "{neon_type[1]}", "vld1{neon_type[1].lane_nox}::<0>(ptr, transmute({type[3]}::splat(0.)))"] + - Let: [x, "{neon_type[1]}", "vld1{neon_type[1].lane_nox}::<0>(ptr, transmute({type[3]}::splat(0.0)))"] - FnCall: [simd_shuffle!, [x, x, "{type[4]}"]] @@ -2873,7 +2816,7 @@ intrinsics: attr: - *enable-v7 - *target-is-arm - - *neon-unstable + - *neon-arm-unstable assert_instr: [vld2] safety: unsafe: [neon] @@ -2907,7 +2850,7 @@ intrinsics: attr: - *enable-v7 - *target-is-arm - - *neon-unstable + - *neon-arm-unstable assert_instr: [nop] safety: unsafe: [neon] @@ -2990,8 +2933,8 @@ intrinsics: - *neon-v7 - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vld2]]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [ld2]]}]] - - *neon-stable-not-arm - - *neon-unstable-is-arm + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: unsafe: [neon] types: @@ -3020,8 +2963,8 @@ intrinsics: - *neon-v7 - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [nop]]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [nop]]}]] - - *neon-stable-not-arm - - *neon-unstable-is-arm + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: unsafe: [neon] types: @@ -3042,8 +2985,8 @@ intrinsics: - *neon-v8 - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [nop]]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [nop]]}]] - - *neon-stable-not-arm - - *neon-unstable-is-arm + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: unsafe: [neon] types: @@ -3072,7 +3015,7 @@ intrinsics: - FnCall: - rustc_legacy_const_generics - - "2" - - *neon-unstable + - *neon-arm-unstable static_defs: - "const LANE: i32" safety: @@ -3114,8 +3057,8 @@ intrinsics: - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vld2, 'LANE = 0']]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [ld2, 'LANE = 0']]}]] - FnCall: [rustc_legacy_const_generics, ["2"]] - - *neon-stable-not-arm - - *neon-unstable-is-arm + - *neon-not-arm-stable + - *neon-cfg-arm-unstable static_defs: - "const LANE: i32" safety: @@ -3205,7 +3148,7 @@ intrinsics: - - vld2 - "LANE = 0" - FnCall: [rustc_legacy_const_generics, ["2"]] - - *neon-unstable + - *neon-arm-unstable static_defs: - "const LANE: i32" safety: @@ -3246,7 +3189,7 @@ intrinsics: attr: - *enable-v7 - *target-is-arm - - *neon-unstable + - *neon-arm-unstable assert_instr: [nop] safety: unsafe: [neon] @@ -3297,7 +3240,7 @@ intrinsics: attr: - *enable-v7 - *target-is-arm - - *neon-unstable + - *neon-arm-unstable assert_instr: [vld2] safety: unsafe: [neon] @@ -3332,8 +3275,8 @@ intrinsics: - *neon-v7 - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vld2]]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [ld2r]]}]] - - *neon-stable-not-arm - - *neon-unstable-is-arm + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: unsafe: [neon] types: @@ -3364,8 +3307,8 @@ intrinsics: - *neon-v7 - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [nop]]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [ld2r]]}]] - - *neon-stable-not-arm - - *neon-unstable-is-arm + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: unsafe: [neon] types: @@ -3388,8 +3331,8 @@ intrinsics: - *neon-v8 - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [nop]]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [ld2r]]}]] - - *neon-stable-not-arm - - *neon-unstable-is-arm + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: unsafe: [neon] types: @@ -3923,7 +3866,7 @@ intrinsics: attr: - *target-is-arm - *enable-v7 - - *neon-unstable + - *neon-arm-unstable assert_instr: [vld3] safety: unsafe: [neon] @@ -3954,7 +3897,7 @@ intrinsics: attr: - *target-is-arm - *enable-v7 - - *neon-unstable + - *neon-arm-unstable assert_instr: [nop] safety: unsafe: [neon] @@ -4010,7 +3953,7 @@ intrinsics: - *enable-v7 - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [vld3, 'LANE = 0']]}]] - FnCall: [rustc_legacy_const_generics, ["2"]] - - *neon-unstable + - *neon-arm-unstable static_defs: ['const LANE: i32'] safety: unsafe: [neon] @@ -4041,7 +3984,7 @@ intrinsics: - *enable-v7 - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [vld3, 'LANE = 0']]}]] - FnCall: [rustc_legacy_const_generics, ["2"]] - - *neon-unstable + - *neon-arm-unstable static_defs: ['const LANE: i32'] safety: unsafe: [neon] @@ -4076,7 +4019,7 @@ intrinsics: - *enable-v7 - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [vld3, 'LANE = 0']]}]] - FnCall: [rustc_legacy_const_generics, ["2"]] - - *neon-unstable + - *neon-arm-unstable static_defs: ['const LANE: i32'] safety: unsafe: [neon] @@ -4107,8 +4050,8 @@ intrinsics: - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vld3, 'LANE = 0']]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [ld3, 'LANE = 0']]}]] - FnCall: [rustc_legacy_const_generics, ["2"]] - - *neon-stable-not-arm - - *neon-unstable-is-arm + - *neon-not-arm-stable + - *neon-cfg-arm-unstable static_defs: ['const LANE: i32'] safety: unsafe: [neon] @@ -4138,8 +4081,8 @@ intrinsics: - *neon-v7 - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vld3]]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [ld3]]}]] - - *neon-stable-not-arm - - *neon-unstable-is-arm + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: unsafe: [neon] types: @@ -4168,8 +4111,8 @@ intrinsics: - *neon-v7 - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [nop]]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [nop]]}]] - - *neon-stable-not-arm - - *neon-unstable-is-arm + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: unsafe: [neon] types: @@ -4190,8 +4133,8 @@ intrinsics: - *neon-v8 - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [nop]]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [nop]]}]] - - *neon-stable-not-arm - - *neon-unstable-is-arm + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: unsafe: [neon] types: @@ -4237,7 +4180,7 @@ intrinsics: doc: Load single 3-element structure and replicate to all lanes of three registers arguments: ["a: {type[0]}"] return_type: "{neon_type[1]}" - attr: [*enable-v7, *target-is-arm, *neon-unstable] + attr: [*enable-v7, *target-is-arm, *neon-arm-unstable] assert_instr: [vld3] safety: unsafe: [neon] @@ -4269,8 +4212,8 @@ intrinsics: - *neon-v7 - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vld3]]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [ld3r]]}]] - - *neon-stable-not-arm - - *neon-unstable-is-arm + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: unsafe: [neon] types: @@ -4297,7 +4240,7 @@ intrinsics: doc: Load single 3-element structure and replicate to all lanes of three registers arguments: ["a: {type[0]}"] return_type: "{neon_type[1]}" - attr: [*target-is-arm, *enable-v7, *neon-unstable] + attr: [*target-is-arm, *enable-v7, *neon-arm-unstable] assert_instr: [nop] safety: unsafe: [neon] @@ -4322,8 +4265,8 @@ intrinsics: - *neon-v7 - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [nop]]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [ld3r]]}]] - - *neon-stable-not-arm - - *neon-unstable-is-arm + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: unsafe: [neon] types: @@ -4346,8 +4289,8 @@ intrinsics: - *neon-v8 - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [nop]]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [ld3r]]}]] - - *neon-stable-not-arm - - *neon-unstable-is-arm + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: unsafe: [neon] types: @@ -4454,7 +4397,7 @@ intrinsics: attr: - *enable-v7 - *target-is-arm - - *neon-unstable + - *neon-arm-unstable assert_instr: [vld4] safety: unsafe: [neon] @@ -4485,7 +4428,7 @@ intrinsics: attr: - *enable-v7 - *target-is-arm - - *neon-unstable + - *neon-arm-unstable assert_instr: [nop] safety: unsafe: [neon] @@ -4510,8 +4453,8 @@ intrinsics: - *neon-v7 - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vld4]]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [ld4]]}]] - - *neon-stable-not-arm - - *neon-unstable-is-arm + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: unsafe: [neon] types: @@ -4542,8 +4485,8 @@ intrinsics: - *neon-v7 - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [nop]]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [nop]]}]] - - *neon-stable-not-arm - - *neon-unstable-is-arm + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: unsafe: [neon] types: @@ -4568,8 +4511,8 @@ intrinsics: - - 'enable = "neon,aes"' - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [nop]]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [nop]]}]] - - *neon-stable-not-arm - - *neon-unstable-is-arm + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: unsafe: [neon] types: @@ -4592,7 +4535,7 @@ intrinsics: - *target-is-arm - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [vld4, 'LANE = 0']]}]] - FnCall: [rustc_legacy_const_generics, ["2"]] - - *neon-unstable + - *neon-arm-unstable static_defs: ["const LANE: i32"] safety: unsafe: [neon] @@ -4630,8 +4573,8 @@ intrinsics: - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vld4, 'LANE = 0']]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [ld4, 'LANE = 0']]}]] - FnCall: [rustc_legacy_const_generics, ["2"]] - - *neon-stable-not-arm - - *neon-unstable-is-arm + - *neon-not-arm-stable + - *neon-cfg-arm-unstable static_defs: ["const LANE: i32"] safety: unsafe: [neon] @@ -4664,8 +4607,8 @@ intrinsics: - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [nop, 'LANE = 0']]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [nop, 'LANE = 0']]}]] - FnCall: [rustc_legacy_const_generics, ["2"]] - - *neon-stable-not-arm - - *neon-unstable-is-arm + - *neon-not-arm-stable + - *neon-cfg-arm-unstable types: - ['*mut i64', int64x1_t] - ['*mut u64', uint64x1_t] @@ -4690,8 +4633,8 @@ intrinsics: - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [nop, 'LANE = 0']]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [nop, 'LANE = 0']]}]] - FnCall: [rustc_legacy_const_generics, ["2"]] - - *neon-stable-not-arm - - *neon-unstable-is-arm + - *neon-not-arm-stable + - *neon-cfg-arm-unstable types: - ['*mut p64', poly64x1_t] compose: @@ -4715,8 +4658,8 @@ intrinsics: - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [nop, 'LANE = 0']]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [nop, 'LANE = 0']]}]] - FnCall: [rustc_legacy_const_generics, ["2"]] - - *neon-stable-not-arm - - *neon-unstable-is-arm + - *neon-not-arm-stable + - *neon-cfg-arm-unstable types: - ['*mut p64', poly64x2_t] compose: @@ -4737,8 +4680,8 @@ intrinsics: - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [nop, 'LANE = 0']]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [nop, 'LANE = 0']]}]] - FnCall: [rustc_legacy_const_generics, ["2"]] - - *neon-stable-not-arm - - *neon-unstable-is-arm + - *neon-not-arm-stable + - *neon-cfg-arm-unstable types: - ['*mut i8', int8x8_t, '3'] - ['*mut i16', int16x4_t, '2'] @@ -4893,7 +4836,7 @@ intrinsics: attr: - *enable-v7 - *target-is-arm - - *neon-unstable + - *neon-arm-unstable assert_instr: [vst1] types: - [i8, int8x8x2_t, int8x8_t] @@ -4924,7 +4867,7 @@ intrinsics: attr: - *enable-v7 - *target-is-arm - - *neon-unstable + - *neon-arm-unstable assert_instr: [vst1] types: - [i8, int8x8x3_t, int8x8_t] @@ -4956,7 +4899,7 @@ intrinsics: attr: - *target-is-arm - *enable-v7 - - *neon-unstable + - *neon-arm-unstable assert_instr: [vst1] types: - [i8, int8x8x4_t, int8x8_t] @@ -4989,7 +4932,7 @@ intrinsics: attr: - *target-is-arm - *enable-v7 - - *neon-unstable + - *neon-arm-unstable assert_instr: [vst1] types: - [f32, float32x2x4_t, float32x2_t] @@ -5044,8 +4987,8 @@ intrinsics: - *neon-aes - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [nop]]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [nop]]}]] - - *neon-stable-not-arm - - *neon-unstable-is-arm + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: unsafe: [neon] types: @@ -5062,7 +5005,7 @@ intrinsics: attr: - *target-is-arm - *enable-v7 - - *neon-unstable + - *neon-arm-unstable assert_instr: [nop] safety: unsafe: [neon] @@ -5088,8 +5031,8 @@ intrinsics: - *neon-v7 - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [nop]]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [nop]]}]] - - *neon-stable-not-arm - - *neon-unstable-is-arm + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: unsafe: [neon] types: @@ -5187,8 +5130,8 @@ intrinsics: - *neon-v7 - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vst2]]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [st2]]}]] - - *neon-stable-not-arm - - *neon-unstable-is-arm + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: unsafe: [neon] types: @@ -5280,8 +5223,8 @@ intrinsics: - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vst2, 'LANE = 0']]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [st2, 'LANE = 0']]}]] - FnCall: [rustc_legacy_const_generics, ['2']] - - *neon-stable-not-arm - - *neon-unstable-is-arm + - *neon-not-arm-stable + - *neon-cfg-arm-unstable static_defs: ['const LANE: i32'] safety: unsafe: [neon] @@ -5307,7 +5250,7 @@ intrinsics: attr: - *target-is-arm - *enable-v7 - - *neon-unstable + - *neon-arm-unstable assert_instr: [vst2] safety: unsafe: [neon] @@ -5370,7 +5313,7 @@ intrinsics: - *enable-v7 - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [vst2, 'LANE = 0']]}]] - FnCall: [rustc_legacy_const_generics, ['2']] - - *neon-unstable + - *neon-arm-unstable static_defs: ['const LANE: i32'] safety: unsafe: [neon] @@ -5462,8 +5405,8 @@ intrinsics: - *neon-aes - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [nop]]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [nop]]}]] - - *neon-stable-not-arm - - *neon-unstable-is-arm + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: unsafe: [neon] types: @@ -5480,7 +5423,7 @@ intrinsics: attr: - *target-is-arm - *enable-v7 - - *neon-unstable + - *neon-arm-unstable assert_instr: [nop] safety: unsafe: [neon] @@ -5507,8 +5450,8 @@ intrinsics: - *neon-v7 - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [nop]]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [nop]]}]] - - *neon-stable-not-arm - - *neon-unstable-is-arm + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: unsafe: [neon] types: @@ -5527,8 +5470,8 @@ intrinsics: - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vst3, 'LANE = 0']]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [st3, 'LANE = 0']]}]] - FnCall: [rustc_legacy_const_generics, ['2']] - - *neon-stable-not-arm - - *neon-unstable-is-arm + - *neon-not-arm-stable + - *neon-cfg-arm-unstable static_defs: ["const LANE: i32"] safety: unsafe: [neon] @@ -5555,8 +5498,8 @@ intrinsics: - *neon-v7 - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vst3]]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [st3]]}]] - - *neon-stable-not-arm - - *neon-unstable-is-arm + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: unsafe: [neon] types: @@ -5582,7 +5525,7 @@ intrinsics: attr: - *target-is-arm - *enable-v7 - - *neon-unstable + - *neon-arm-unstable assert_instr: [vst3] safety: unsafe: [neon] @@ -5647,7 +5590,7 @@ intrinsics: - *enable-v7 - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [vst3, 'LANE = 0']]}]] - FnCall: [rustc_legacy_const_generics, ['2']] - - *neon-unstable + - *neon-arm-unstable static_defs: ['const LANE: i32'] safety: unsafe: [neon] @@ -5840,8 +5783,8 @@ intrinsics: - *neon-aes - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [nop]]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [nop]]}]] - - *neon-stable-not-arm - - *neon-unstable-is-arm + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: unsafe: [neon] types: @@ -5858,7 +5801,7 @@ intrinsics: attr: - *target-is-arm - *enable-v7 - - *neon-unstable + - *neon-arm-unstable assert_instr: [nop] safety: unsafe: [neon] @@ -5911,8 +5854,8 @@ intrinsics: - *neon-v7 - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [nop]]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [nop]]}]] - - *neon-stable-not-arm - - *neon-unstable-is-arm + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: unsafe: [neon] types: @@ -5931,8 +5874,8 @@ intrinsics: - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vst4, 'LANE = 0']]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [st4, 'LANE = 0']]}]] - FnCall: [rustc_legacy_const_generics, ['2']] - - *neon-stable-not-arm - - *neon-unstable-is-arm + - *neon-not-arm-stable + - *neon-cfg-arm-unstable static_defs: ["const LANE: i32"] safety: unsafe: [neon] @@ -5959,8 +5902,8 @@ intrinsics: - *neon-v7 - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vst4]]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [st4]]}]] - - *neon-stable-not-arm - - *neon-unstable-is-arm + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: unsafe: [neon] types: @@ -5986,7 +5929,7 @@ intrinsics: attr: - *target-is-arm - *enable-v7 - - *neon-unstable + - *neon-arm-unstable assert_instr: [vst4] safety: unsafe: [neon] @@ -6053,7 +5996,7 @@ intrinsics: - *enable-v7 - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [vst4, 'LANE = 0']]}]] - FnCall: [rustc_legacy_const_generics, ['2']] - - *neon-unstable + - *neon-arm-unstable static_defs: ['const LANE: i32'] safety: unsafe: [neon] @@ -6253,7 +6196,7 @@ intrinsics: - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vusdot]]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [usdot]]}]] - *neon-unstable-i8mm - - *neon-unstable-is-arm + - *neon-cfg-arm-unstable safety: safe types: - [int32x2_t, uint8x8_t, int8x8_t] @@ -6278,7 +6221,7 @@ intrinsics: - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [usdot, 'LANE = 0']]}]] - FnCall: [rustc_legacy_const_generics, ['3']] - *neon-unstable-i8mm - - *neon-unstable-is-arm + - *neon-cfg-arm-unstable static_defs: ["const LANE: i32"] safety: safe types: @@ -6307,7 +6250,7 @@ intrinsics: - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [sudot, 'LANE = 0']]}]] - FnCall: [rustc_legacy_const_generics, ['3']] - *neon-unstable-i8mm - - *neon-unstable-is-arm + - *neon-cfg-arm-unstable static_defs: ["const LANE: i32"] safety: safe types: @@ -6333,8 +6276,8 @@ intrinsics: - *neon-v7 - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vmul{type[0]}"']]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [mul]]}]] - - *neon-stable-not-arm - - *neon-unstable-is-arm + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - ['.i8', int8x8_t] @@ -6360,8 +6303,8 @@ intrinsics: - *neon-v7 - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vmul.{type[0]}"']]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [fmul]]}]] - - *neon-stable-not-arm - - *neon-unstable-is-arm + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - [f32, float32x2_t] @@ -6397,8 +6340,8 @@ intrinsics: - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vmul, 'LANE = 1']]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [mul, 'LANE = 1']]}]] - FnCall: [rustc_legacy_const_generics, ['2']] - - *neon-stable-not-arm - - *neon-unstable-is-arm + - *neon-not-arm-stable + - *neon-cfg-arm-unstable static_defs: ["const LANE: i32"] safety: safe types: @@ -6451,8 +6394,8 @@ intrinsics: - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vmul, 'LANE = 1']]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [mul, 'LANE = 1']]}]] - FnCall: [rustc_legacy_const_generics, ['2']] - - *neon-stable-not-arm - - *neon-unstable-is-arm + - *neon-not-arm-stable + - *neon-cfg-arm-unstable static_defs: ["const LANE: i32"] safety: safe types: @@ -6479,8 +6422,8 @@ intrinsics: - *neon-v7 - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vmull.{type[0]}"']]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [smull]]}]] - - *neon-stable-not-arm - - *neon-unstable-is-arm + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - ["s8", int8x8_t, int16x8_t] @@ -6503,8 +6446,8 @@ intrinsics: - *neon-v7 - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vmull.{type[0]}"']]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [umull]]}]] - - *neon-stable-not-arm - - *neon-unstable-is-arm + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - ["u8", uint8x8_t, uint16x8_t] @@ -6527,8 +6470,8 @@ intrinsics: - *neon-v7 - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vmull.{type[0]}"']]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [pmull]]}]] - - *neon-stable-not-arm - - *neon-unstable-is-arm + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - ["p8", poly8x8_t, poly16x8_t] @@ -6549,8 +6492,8 @@ intrinsics: - *neon-v7 - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ["vmull"]]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [smull]]}]] - - *neon-stable-not-arm - - *neon-unstable-is-arm + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - [int16x4_t, "i16", int32x4_t] @@ -6571,8 +6514,8 @@ intrinsics: - *neon-v7 - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ["vmull"]]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [umull]]}]] - - *neon-stable-not-arm - - *neon-unstable-is-arm + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - [uint16x4_t, "u16", uint32x4_t] @@ -6612,22 +6555,8 @@ intrinsics: - FnCall: - assert_instr - - fmla - - FnCall: - - cfg_attr - - - FnCall: - - not - - - 'target_arch = "arm"' - - FnCall: - - stable - - - 'feature = "neon_intrinsics"' - - 'since = "1.59.0"' - - FnCall: - - cfg_attr - - - 'target_arch = "arm"' - - FnCall: - - unstable - - - 'feature = "stdarch_arm_neon_intrinsics"' - - 'issue = "111800"' + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - float32x2_t @@ -6649,8 +6578,8 @@ intrinsics: return_type: "{neon_type}" attr: - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "vfp4"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, [vfma]]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [fmla]]}]] + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vfma]]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [fmla]]}]] - *neon-fp16 - *neon-unstable-f16 safety: safe @@ -6695,22 +6624,8 @@ intrinsics: - FnCall: - assert_instr - - fmla - - FnCall: - - cfg_attr - - - FnCall: - - not - - - 'target_arch = "arm"' - - FnCall: - - stable - - - 'feature = "neon_intrinsics"' - - 'since = "1.59.0"' - - FnCall: - - cfg_attr - - - 'target_arch = "arm"' - - FnCall: - - unstable - - - 'feature = "stdarch_arm_neon_intrinsics"' - - 'issue = "111800"' + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - [float32x2_t, f32] @@ -6732,8 +6647,8 @@ intrinsics: - *neon-v7 - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vsub{type[0]}"']]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [sub]]}]] - - *neon-stable-not-arm - - *neon-unstable-is-arm + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - ['.i8', int8x8_t] @@ -6763,8 +6678,8 @@ intrinsics: - *neon-v7 - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vsub.{type[0]}"']]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [fsub]]}]] - - *neon-stable-not-arm - - *neon-unstable-is-arm + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - ['f32', float32x2_t] @@ -6832,12 +6747,7 @@ intrinsics: arguments: ["a: {neon_type}", "b: {neon_type}"] return_type: "{neon_type}" attr: - - FnCall: - - cfg_attr - - - 'target_arch = "arm"' - - FnCall: - - target_feature - - - 'enable = "v7"' + - *neon-v7 - FnCall: - cfg_attr - - FnCall: @@ -6859,22 +6769,8 @@ intrinsics: - FnCall: - assert_instr - - nop - - FnCall: - - cfg_attr - - - FnCall: - - not - - - 'target_arch = "arm"' - - FnCall: - - stable - - - 'feature = "neon_intrinsics"' - - 'since = "1.59.0"' - - FnCall: - - cfg_attr - - - 'target_arch = "arm"' - - FnCall: - - unstable - - - 'feature = "stdarch_arm_neon_intrinsics"' - - 'issue = "111800"' + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - poly8x8_t @@ -6894,12 +6790,7 @@ intrinsics: arguments: ["a: {type}", "b: {type}"] return_type: "{type}" attr: - - FnCall: - - cfg_attr - - - 'target_arch = "arm"' - - FnCall: - - target_feature - - - 'enable = "v7"' + - *neon-v7 - FnCall: - cfg_attr - - FnCall: @@ -6921,22 +6812,8 @@ intrinsics: - FnCall: - assert_instr - - nop - - FnCall: - - cfg_attr - - - FnCall: - - not - - - 'target_arch = "arm"' - - FnCall: - - stable - - - 'feature = "neon_intrinsics"' - - 'since = "1.59.0"' - - FnCall: - - cfg_attr - - - 'target_arch = "arm"' - - FnCall: - - unstable - - - 'feature = "stdarch_arm_neon_intrinsics"' - - 'issue = "111800"' + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - p128 @@ -6953,8 +6830,8 @@ intrinsics: - *neon-v7 - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ["vsubhn"]]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [subhn]]}]] - - *neon-stable-not-arm - - *neon-unstable-is-arm + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - [int16x8_t, int8x8_t, 'i16x8', 'i16x8::new(8, 8, 8, 8, 8, 8, 8, 8)'] @@ -6980,8 +6857,8 @@ intrinsics: - *neon-v7 - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ["vsubhn"]]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [subhn2]]}]] - - *neon-stable-not-arm - - *neon-unstable-is-arm + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - [int8x8_t, int16x8_t, int8x16_t, '[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]'] @@ -7005,8 +6882,8 @@ intrinsics: - *neon-v7 - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vhsub.{type[0]}"']]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [uhsub]]}]] - - *neon-stable-not-arm - - *neon-unstable-is-arm + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - ['u8', uint8x8_t] @@ -7032,8 +6909,8 @@ intrinsics: - *neon-v7 - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vhsub.{type[0]}"']]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [shsub]]}]] - - *neon-stable-not-arm - - *neon-unstable-is-arm + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - ['s8', int8x8_t] @@ -7059,8 +6936,8 @@ intrinsics: - *neon-v7 - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vsubw]]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [ssubw]]}]] - - *neon-stable-not-arm - - *neon-unstable-is-arm + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - [int16x8_t, int8x8_t] @@ -7080,8 +6957,8 @@ intrinsics: - *neon-v7 - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vsubw]]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [usubw]]}]] - - *neon-stable-not-arm - - *neon-unstable-is-arm + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - [uint16x8_t, uint8x8_t] @@ -7101,8 +6978,8 @@ intrinsics: - *neon-v7 - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vsubl]]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [ssubl]]}]] - - *neon-stable-not-arm - - *neon-unstable-is-arm + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - [int8x8_t, int16x8_t] @@ -7127,8 +7004,8 @@ intrinsics: - *neon-v7 - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vsubl]]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [usubl]]}]] - - *neon-stable-not-arm - - *neon-unstable-is-arm + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - [uint8x8_t, uint16x8_t] @@ -7155,7 +7032,7 @@ intrinsics: - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vsdot]]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [sdot]]}]] - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [unstable, ['feature = "stdarch_neon_dotprod"', 'issue = "117224"']]}]] - - *neon-unstable-is-arm + - *neon-cfg-arm-unstable safety: safe types: - [int32x2_t, int8x8_t] @@ -7179,7 +7056,7 @@ intrinsics: - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vudot]]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [udot]]}]] - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [unstable, ['feature = "stdarch_neon_dotprod"', 'issue = "117224"']]}]] - - *neon-unstable-is-arm + - *neon-cfg-arm-unstable safety: safe types: - [uint32x2_t, uint8x8_t] @@ -7205,7 +7082,7 @@ intrinsics: - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [sdot, 'LANE = 0']]}]] - FnCall: [rustc_legacy_const_generics, ['3']] - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [unstable, ['feature = "stdarch_neon_dotprod"', 'issue = "117224"']]}]] - - *neon-unstable-is-arm + - *neon-cfg-arm-unstable safety: safe types: - [int32x2_t, int8x8_t, int8x8_t, int32x2_t, '[LANE as u32, LANE as u32]'] @@ -7238,7 +7115,7 @@ intrinsics: - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [udot, 'LANE = 0']]}]] - FnCall: [rustc_legacy_const_generics, ['3']] - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [unstable, ['feature = "stdarch_neon_dotprod"', 'issue = "117224"']]}]] - - *neon-unstable-is-arm + - *neon-cfg-arm-unstable safety: safe types: - [uint32x2_t, uint8x8_t, uint8x8_t, uint32x2_t, '[LANE as u32, LANE as u32]'] @@ -7267,8 +7144,8 @@ intrinsics: - *neon-v7 - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vmax]]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [smax]]}]] - - *neon-stable-not-arm - - *neon-unstable-is-arm + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - int8x8_t @@ -7294,8 +7171,8 @@ intrinsics: - *neon-v7 - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vmax]]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [umax]]}]] - - *neon-stable-not-arm - - *neon-unstable-is-arm + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - uint8x8_t @@ -7321,8 +7198,8 @@ intrinsics: - *neon-v7 - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vmax]]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [fmax]]}]] - - *neon-stable-not-arm - - *neon-unstable-is-arm + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - float32x2_t @@ -7369,8 +7246,8 @@ intrinsics: - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [target_feature, ['enable = "fp-armv8,v8"']]}]] - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vmaxnm]]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [fmaxnm]]}]] - - *neon-stable-not-arm - - *neon-unstable-is-arm + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - float32x2_t @@ -7441,8 +7318,8 @@ intrinsics: - *neon-v7 - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vmin]]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [smin]]}]] - - *neon-stable-not-arm - - *neon-unstable-is-arm + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - int8x8_t @@ -7468,8 +7345,8 @@ intrinsics: - *neon-v7 - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vmin]]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [umin]]}]] - - *neon-stable-not-arm - - *neon-unstable-is-arm + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - uint8x8_t @@ -7495,8 +7372,8 @@ intrinsics: - *neon-v7 - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vmin]]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [fmin]]}]] - - *neon-stable-not-arm - - *neon-unstable-is-arm + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - float32x2_t @@ -7544,8 +7421,8 @@ intrinsics: - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [target_feature, ['enable = "fp-armv8,v8"']]}]] - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vminnm]]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [fminnm]]}]] - - *neon-stable-not-arm - - *neon-unstable-is-arm + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - float32x2_t @@ -7567,8 +7444,8 @@ intrinsics: - *neon-v7 - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vpadd]]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [faddp]]}]] - - *neon-stable-not-arm - - *neon-unstable-is-arm + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - float32x2_t @@ -7609,11 +7486,11 @@ intrinsics: arguments: ["a: {neon_type[0]}", "b: {neon_type[0]}"] return_type: "{neon_type[1]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, [vqdmull]]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [sqdmull]]}]] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vqdmull]]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [sqdmull]]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - [int16x4_t, int32x4_t] @@ -7632,11 +7509,11 @@ intrinsics: arguments: ["a: {neon_type[0]}", "b: {type[1]}"] return_type: "{neon_type[2]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, [vqdmull]]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [sqdmull]]}]] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vqdmull]]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [sqdmull]]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - [int16x4_t, "i16", int32x4_t] @@ -7649,12 +7526,12 @@ intrinsics: arguments: ["a: {neon_type[0]}", "b: {neon_type[0]}"] return_type: "{neon_type[2]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, [vqdmull, 'N = 2']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [sqdmull, 'N = 2']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vqdmull, 'N = 2']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [sqdmull, 'N = 2']]}]] - FnCall: [rustc_legacy_const_generics, ['2']] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable static_defs: ['const N: i32'] safety: safe types: @@ -7669,12 +7546,12 @@ intrinsics: arguments: ["a: {neon_type[0]}", "b: {neon_type[0]}"] return_type: "{neon_type[2]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, [vqdmull, 'N = 1']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [sqdmull, 'N = 1']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vqdmull, 'N = 1']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [sqdmull, 'N = 1']]}]] - FnCall: [rustc_legacy_const_generics, ['2']] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable static_defs: ['const N: i32'] safety: safe types: @@ -7689,11 +7566,11 @@ intrinsics: arguments: ["a: {neon_type[0]}", "b: {neon_type[1]}", "c: {neon_type[1]}"] return_type: "{neon_type[0]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, [vqdmlal]]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [sqdmlal]]}]] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vqdmlal]]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [sqdmlal]]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - [int32x4_t, int16x4_t, int16x4_t, int32x4_t] @@ -7706,11 +7583,11 @@ intrinsics: arguments: ["a: {neon_type[0]}", "b: {neon_type[1]}", "c: {type[2]}"] return_type: "{neon_type[0]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, [vqdmlal]]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [sqdmlal]]}]] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vqdmlal]]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [sqdmlal]]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - [int32x4_t, int16x4_t, "i16", int32x4_t] @@ -7723,12 +7600,12 @@ intrinsics: arguments: ["a: {neon_type[0]}", "b: {neon_type[1]}", "c: {neon_type[1]}"] return_type: "{neon_type[0]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, [vqdmlal, N = 2]]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [sqdmlal, 'N = 2']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vqdmlal, N = 2]]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [sqdmlal, 'N = 2']]}]] - FnCall: [rustc_legacy_const_generics, ['3']] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable static_defs: ['const N: i32'] safety: safe types: @@ -7742,12 +7619,12 @@ intrinsics: arguments: ["a: {neon_type[0]}", "b: {neon_type[1]}", "c: {neon_type[1]}"] return_type: "{neon_type[0]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, [vqdmlal, N = 1]]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [sqdmlal, 'N = 1']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vqdmlal, N = 1]]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [sqdmlal, 'N = 1']]}]] - FnCall: [rustc_legacy_const_generics, ['3']] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable static_defs: ['const N: i32'] safety: safe types: @@ -7761,11 +7638,11 @@ intrinsics: arguments: ["a: {neon_type[0]}", "b: {neon_type[1]}", "c: {neon_type[1]}"] return_type: "{neon_type[0]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, [vqdmlsl]]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [sqdmlsl]]}]] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vqdmlsl]]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [sqdmlsl]]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - [int32x4_t, int16x4_t, int16x4_t, int32x4_t] @@ -7778,11 +7655,11 @@ intrinsics: arguments: ["a: {neon_type[0]}", "b: {neon_type[1]}", "c: {type[2]}"] return_type: "{neon_type[0]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, [vqdmlsl]]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [sqdmlsl]]}]] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vqdmlsl]]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [sqdmlsl]]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - [int32x4_t, int16x4_t, "i16", int32x4_t, '_n_s16'] @@ -7795,12 +7672,12 @@ intrinsics: arguments: ["a: {neon_type[0]}", "b: {neon_type[1]}", "c: {neon_type[1]}"] return_type: "{neon_type[0]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, [vqdmlsl, N = 2]]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [sqdmlsl, 'N = 2']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vqdmlsl, N = 2]]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [sqdmlsl, 'N = 2']]}]] - FnCall: [rustc_legacy_const_generics, ['3']] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable static_defs: ['const N: i32'] safety: safe types: @@ -7814,12 +7691,12 @@ intrinsics: arguments: ["a: {neon_type[0]}", "b: {neon_type[1]}", "c: {neon_type[1]}"] return_type: "{neon_type[0]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, [vqdmlsl, N = 1]]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [sqdmlsl, 'N = 1']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vqdmlsl, N = 1]]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [sqdmlsl, 'N = 1']]}]] - FnCall: [rustc_legacy_const_generics, ['3']] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable static_defs: ['const N: i32'] safety: safe types: @@ -7833,11 +7710,11 @@ intrinsics: arguments: ["a: {neon_type[0]}", "b: {neon_type[0]}"] return_type: "{neon_type[0]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, [vqdmulh]]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [sqdmulh]]}]] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vqdmulh]]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [sqdmulh]]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - [int16x4_t, int16x4_t, int16x4_t] @@ -7858,11 +7735,11 @@ intrinsics: arguments: ["a: {neon_type[0]}", "b: {type[1]}"] return_type: "{neon_type[0]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, [vqdmulh]]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [sqdmulh]]}]] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vqdmulh]]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [sqdmulh]]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - [int16x4_t, "i16", int16x4_t, '_n_s16'] @@ -7878,11 +7755,11 @@ intrinsics: arguments: ["a: {neon_type[0]}"] return_type: "{neon_type[1]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, [vqmovn]]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [sqxtn]]}]] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vqmovn]]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [sqxtn]]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - [int16x8_t, int8x8_t] @@ -7902,11 +7779,11 @@ intrinsics: arguments: ["a: {neon_type[0]}"] return_type: "{neon_type[1]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, [vqmovun]]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [sqxtun]]}]] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vqmovun]]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [sqxtun]]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - [int16x8_t, uint8x8_t] @@ -7926,11 +7803,11 @@ intrinsics: arguments: ["a: {neon_type[0]}", "b: {neon_type[0]}"] return_type: "{neon_type[0]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, [vqrdmulh]]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [sqrdmulh]]}]] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vqrdmulh]]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [sqrdmulh]]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - [int16x4_t, int16x4_t, int16x4_t] @@ -7951,11 +7828,11 @@ intrinsics: arguments: ["a: {neon_type}", "b: {neon_type}"] return_type: "{neon_type}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, [vqrshl]]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [sqrshl]]}]] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vqrshl]]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [sqrshl]]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - int8x8_t @@ -7980,11 +7857,11 @@ intrinsics: arguments: ["a: {neon_type[0]}", "b: {neon_type[1]}"] return_type: "{neon_type[0]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, [vqrshl]]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [uqrshl]]}]] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vqrshl]]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [uqrshl]]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - [uint8x8_t, int8x8_t] @@ -8013,7 +7890,7 @@ intrinsics: - *enable-v7 - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [vqrshrn, 'N = 2']]}]] - FnCall: [rustc_legacy_const_generics, ['1']] - - FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']] + - *neon-arm-unstable static_defs: ['const N: i32'] safety: safe types: @@ -8040,7 +7917,7 @@ intrinsics: - FnCall: [cfg, [{FnCall: [not, ['target_arch = "arm"']]}]] - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [sqrshrn, 'N = 2']]}]] - FnCall: [rustc_legacy_const_generics, ['1']] - - FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']] + - *neon-stable static_defs: ['const N: i32'] safety: safe types: @@ -8064,11 +7941,11 @@ intrinsics: arguments: ["a: {neon_type[0]}"] return_type: "{neon_type[1]}" attr: - - FnCall: [cfg, ['target_arch = "arm"']] - - FnCall: [target_feature, ['enable = "neon,v7"']] + - *target-is-arm + - *enable-v7 - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [vqrshrun, 'N = 2']]}]] - FnCall: [rustc_legacy_const_generics, ['1']] - - FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']] + - *neon-arm-unstable static_defs: ['const N: i32'] safety: safe types: @@ -8100,7 +7977,7 @@ intrinsics: - FnCall: [cfg, [{FnCall: [not, ['target_arch = "arm"']]}]] - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [sqrshrun, 'N = 2']]}]] - FnCall: [rustc_legacy_const_generics, ['1']] - - FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']] + - *neon-stable static_defs: ['const N: i32'] safety: safe types: @@ -8124,11 +8001,11 @@ intrinsics: arguments: ["a: {neon_type}", "b: {neon_type}"] return_type: "{neon_type}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, [vqshl]]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [sqshl]]}]] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vqshl]]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [sqshl]]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - int8x8_t @@ -8153,12 +8030,12 @@ intrinsics: arguments: ["a: {neon_type[0]}"] return_type: "{neon_type[0]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, [vqshl, 'N = 2']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [sqshl, 'N = 2']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vqshl, 'N = 2']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [sqshl, 'N = 2']]}]] - FnCall: [rustc_legacy_const_generics, ['1']] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable static_defs: ['const N: i32'] safety: safe types: @@ -8182,11 +8059,11 @@ intrinsics: arguments: ["a: {neon_type[0]}", "b: {neon_type[1]}"] return_type: "{neon_type[0]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, [vqshl]]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [uqshl]]}]] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vqshl]]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [uqshl]]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - [uint8x8_t, int8x8_t] @@ -8211,12 +8088,12 @@ intrinsics: arguments: ["a: {neon_type[0]}"] return_type: "{neon_type[0]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, [vqshl, N = 2]]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [uqshl, 'N = 2']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vqshl, N = 2]]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [uqshl, 'N = 2']]}]] - FnCall: [rustc_legacy_const_generics, ['1']] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable static_defs: ['const N: i32'] safety: safe types: @@ -8240,11 +8117,11 @@ intrinsics: arguments: ["a: {neon_type[0]}"] return_type: "{neon_type[1]}" attr: - - FnCall: [cfg, ['target_arch = "arm"']] - - FnCall: [target_feature, ['enable = "neon,v7"']] + - *target-is-arm + - *enable-v7 - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [vqshrn, 'N = 2']]}]] - FnCall: [rustc_legacy_const_generics, ['1']] - - FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']] + - *neon-arm-unstable static_defs: ['const N: i32'] safety: safe types: @@ -8271,7 +8148,7 @@ intrinsics: - FnCall: [cfg, [{FnCall: [not, ['target_arch = "arm"']]}]] - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [sqshrn, 'N = 2']]}]] - FnCall: [rustc_legacy_const_generics, ['1']] - - FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']] + - *neon-stable static_defs: ['const N: i32'] safety: safe types: @@ -8295,11 +8172,11 @@ intrinsics: arguments: ["a: {neon_type[0]}"] return_type: "{neon_type[1]}" attr: - - FnCall: [cfg, ['target_arch = "arm"']] - - FnCall: [target_feature, ['enable = "neon,v7"']] + - *target-is-arm + - *enable-v7 - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [vqshrn, 'N = 2']]}]] - FnCall: [rustc_legacy_const_generics, ['1']] - - FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']] + - *neon-arm-unstable static_defs: ['const N: i32'] safety: safe types: @@ -8326,7 +8203,7 @@ intrinsics: - FnCall: [cfg, [{FnCall: [not, ['target_arch = "arm"']]}]] - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [uqshrn, 'N = 2']]}]] - FnCall: [rustc_legacy_const_generics, ['1']] - - FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']] + - *neon-stable static_defs: ['const N: i32'] safety: safe types: @@ -8350,11 +8227,11 @@ intrinsics: arguments: ["a: {neon_type[0]}"] return_type: "{neon_type[1]}" attr: - - FnCall: [cfg, ['target_arch = "arm"']] - - FnCall: [target_feature, ['enable = "neon,v7"']] + - *target-is-arm + - *enable-v7 - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [vqshrun, 'N = 2']]}]] - FnCall: [rustc_legacy_const_generics, ['1']] - - FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']] + - *neon-arm-unstable static_defs: ['const N: i32'] safety: safe types: @@ -8381,7 +8258,7 @@ intrinsics: - FnCall: [cfg, [{FnCall: [not, ['target_arch = "arm"']]}]] - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [sqshrun, 'N = 2']]}]] - FnCall: [rustc_legacy_const_generics, ['1']] - - FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']] + - *neon-stable static_defs: ['const N: i32'] safety: safe types: @@ -8405,11 +8282,11 @@ intrinsics: arguments: ["a: {neon_type}", "b: {neon_type}"] return_type: "{neon_type}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, [vrsqrts]]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [frsqrts]]}]] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vrsqrts]]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [frsqrts]]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - float32x2_t @@ -8431,8 +8308,8 @@ intrinsics: attr: - *neon-v8 - *neon-fp16 - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, [vrsqrts]]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [frsqrts]]}]] + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vrsqrts]]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [frsqrts]]}]] - *neon-unstable-f16 safety: safe types: @@ -8453,11 +8330,11 @@ intrinsics: arguments: ["a: {neon_type}"] return_type: "{neon_type}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, [vrecpe]]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [frecpe]]}]] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vrecpe]]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [frecpe]]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - float32x2_t @@ -8478,8 +8355,8 @@ intrinsics: return_type: "{neon_type}" attr: - *neon-v8 - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, [vrecpe]]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [frecpe]]}]] + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vrecpe]]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [frecpe]]}]] - *neon-fp16 - *neon-unstable-f16 safety: safe @@ -8501,11 +8378,11 @@ intrinsics: arguments: ["a: {neon_type}", "b: {neon_type}"] return_type: "{neon_type}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, [vrecps]]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [frecps]]}]] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vrecps]]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [frecps]]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - float32x2_t @@ -8526,8 +8403,8 @@ intrinsics: return_type: "{neon_type}" attr: - *neon-v8 - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, [vrecps]]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [frecps]]}]] + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vrecps]]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [frecps]]}]] - *neon-fp16 - *neon-unstable-f16 safety: safe @@ -8553,8 +8430,8 @@ intrinsics: - *neon-v8 - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [nop]]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [nop]]}]] - - *neon-stable-not-arm - - *neon-unstable-is-arm + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - [poly64x1_t, int32x2_t] @@ -8622,8 +8499,8 @@ intrinsics: - *neon-v7 - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [nop]]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [nop]]}]] - - *neon-stable-not-arm - - *neon-unstable-is-arm + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - [uint8x8_t, int8x8_t] @@ -8958,11 +8835,11 @@ intrinsics: arguments: ["a: {neon_type}", "b: {neon_type}"] return_type: "{neon_type}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, [vrshl]]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [srshl]]}]] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vrshl]]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [srshl]]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - int8x8_t @@ -8987,11 +8864,11 @@ intrinsics: arguments: ["a: {neon_type[0]}", "b: {neon_type[1]}"] return_type: "{neon_type[0]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, [vrshl]]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [urshl]]}]] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vrshl]]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [urshl]]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - [uint8x8_t, int8x8_t] @@ -9016,12 +8893,12 @@ intrinsics: arguments: ["a: {neon_type[0]}"] return_type: "{neon_type[0]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, [vrshr, 'N = 2']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [srshr, 'N = 2']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vrshr, 'N = 2']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [srshr, 'N = 2']]}]] - FnCall: [rustc_legacy_const_generics, ['1']] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable static_defs: ['const N: i32'] safety: safe types: @@ -9045,12 +8922,12 @@ intrinsics: arguments: ["a: {neon_type[0]}"] return_type: "{neon_type[0]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, [vrshr, N = 2]]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [urshr, 'N = 2']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vrshr, N = 2]]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [urshr, 'N = 2']]}]] - FnCall: [rustc_legacy_const_generics, ['1']] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable static_defs: ['const N: i32'] safety: safe types: @@ -9074,11 +8951,11 @@ intrinsics: arguments: ["a: {neon_type[0]}"] return_type: "{neon_type[1]}" attr: - - FnCall: [cfg, ['target_arch = "arm"']] - - FnCall: [target_feature, ['enable = "neon,v7"']] + - *target-is-arm + - *enable-v7 - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [vrshrn, 'N = 2']]}]] - FnCall: [rustc_legacy_const_generics, ['1']] - - FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']] + - *neon-arm-unstable static_defs: ['const N: i32'] safety: safe types: @@ -9105,7 +8982,7 @@ intrinsics: - FnCall: [cfg, [{FnCall: [not, ['target_arch = "arm"']]}]] - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [rshrn, 'N = 2']]}]] - FnCall: [rustc_legacy_const_generics, ['1']] - - FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']] + - *neon-stable static_defs: ['const N: i32'] safety: safe types: @@ -9129,12 +9006,12 @@ intrinsics: arguments: ["a: {neon_type[0]}"] return_type: "{neon_type[1]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, [vrshrn, N = 2]]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [rshrn, 'N = 2']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vrshrn, N = 2]]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [rshrn, 'N = 2']]}]] - FnCall: [rustc_legacy_const_generics, ['1']] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable static_defs: ['const N: i32'] safety: safe types: @@ -9154,12 +9031,12 @@ intrinsics: arguments: ["a: {neon_type[0]}", "b: {neon_type[0]}"] return_type: "{neon_type[0]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, [vrsra, 'N = 2']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [srsra, 'N = 2']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vrsra, 'N = 2']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [srsra, 'N = 2']]}]] - FnCall: [rustc_legacy_const_generics, ['2']] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable static_defs: ['const N: i32'] safety: safe types: @@ -9183,11 +9060,11 @@ intrinsics: arguments: ["a: {neon_type[0]}", "b: {neon_type[0]}"] return_type: "{neon_type[2]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, [vrsubhn]]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [rsubhn]]}]] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vrsubhn]]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [rsubhn]]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - [int16x8_t, int16x8_t, int8x8_t] @@ -9207,11 +9084,11 @@ intrinsics: arguments: ["a: {neon_type[0]}", "b: {neon_type[0]}"] return_type: "{neon_type[2]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, [vrsubhn]]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [rsubhn]]}]] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vrsubhn]]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [rsubhn]]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - [uint16x8_t, uint16x8_t, uint8x8_t, s16] @@ -9230,11 +9107,11 @@ intrinsics: arguments: ["a: {type[0]}"] return_type: "{neon_type[1]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, [nop]]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [nop]]}]] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [nop]]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [nop]]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - ["u64", int8x8_t] @@ -9257,8 +9134,8 @@ intrinsics: return_type: "{neon_type[1]}" attr: - *neon-v7 - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, [nop]]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [nop]]}]] + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [nop]]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [nop]]}]] - *neon-fp16 - *neon-unstable-f16 safety: safe @@ -9272,12 +9149,12 @@ intrinsics: arguments: ["a: {type[0]}"] return_type: "{neon_type[1]}" attr: - - FnCall: [target_feature, ['enable = "neon,aes"']] - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v8"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, [nop]]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [nop]]}]] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-aes + - *neon-v8 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [nop]]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [nop]]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - ["u64", poly64x1_t] @@ -9289,12 +9166,12 @@ intrinsics: arguments: ["a: {type[0]}", "b: {neon_type[1]}"] return_type: "{neon_type[1]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, [nop, LANE = 0]]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [nop, 'LANE = 0']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [nop, LANE = 0]]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [nop, 'LANE = 0']]}]] - FnCall: [rustc_legacy_const_generics, ['2']] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable static_defs: ['const LANE: i32'] safety: safe types: @@ -9329,8 +9206,8 @@ intrinsics: return_type: "{neon_type[1]}" attr: - *neon-v8 - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, [nop, LANE = 0]]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [nop, 'LANE = 0']]}]] + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [nop, LANE = 0]]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [nop, 'LANE = 0']]}]] - FnCall: [rustc_legacy_const_generics, ['2']] - *neon-fp16 - *neon-unstable-f16 @@ -9349,12 +9226,12 @@ intrinsics: arguments: ["a: {type[0]}", "b: {neon_type[1]}"] return_type: "{neon_type[1]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, [nop, 'LANE = 0']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [nop, 'LANE = 0']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [nop, 'LANE = 0']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [nop, 'LANE = 0']]}]] - FnCall: [rustc_legacy_const_generics, ['2']] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable static_defs: ['const LANE: i32'] safety: safe types: @@ -9369,13 +9246,13 @@ intrinsics: arguments: ["a: {type[0]}", "b: {neon_type[1]}"] return_type: "{neon_type[1]}" attr: - - FnCall: [target_feature, ['enable = "neon,aes"']] - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v8"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, [nop, 'LANE = 0']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [nop, 'LANE = 0']]}]] + - *neon-aes + - *neon-v8 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [nop, 'LANE = 0']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [nop, 'LANE = 0']]}]] - FnCall: [rustc_legacy_const_generics, ['2']] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable static_defs: ['const LANE: i32'] safety: safe types: @@ -9389,13 +9266,13 @@ intrinsics: arguments: ["a: {type[0]}", "b: {neon_type[1]}"] return_type: "{neon_type[1]}" attr: - - FnCall: [target_feature, ['enable = "neon,aes"']] - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v8"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, [nop, 'LANE = 0']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [nop, 'LANE = 0']]}]] + - *neon-aes + - *neon-v8 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [nop, 'LANE = 0']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [nop, 'LANE = 0']]}]] - FnCall: [rustc_legacy_const_generics, ['2']] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable static_defs: ['const LANE: i32'] safety: safe types: @@ -9409,11 +9286,11 @@ intrinsics: arguments: ["a: {neon_type}", "b: {neon_type}"] return_type: "{neon_type}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, [vshl]]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [sshl]]}]] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vshl]]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [sshl]]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - int8x8_t @@ -9438,11 +9315,11 @@ intrinsics: arguments: ["a: {neon_type[0]}", "b: {neon_type[1]}"] return_type: "{neon_type[0]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, [vshl]]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [ushl]]}]] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vshl]]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [ushl]]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - [uint8x8_t, int8x8_t] @@ -9467,12 +9344,12 @@ intrinsics: arguments: ["a: {neon_type[0]}"] return_type: "{neon_type[1]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, ['"vshll.s8"', 'N = 2']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [sshll, 'N = 2']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vshll.s8"', 'N = 2']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [sshll, 'N = 2']]}]] - FnCall: [rustc_legacy_const_generics, ['1']] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable static_defs: ['const N: i32'] safety: safe types: @@ -9489,12 +9366,12 @@ intrinsics: arguments: ["a: {neon_type[0]}"] return_type: "{neon_type[1]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, ['"vshll.s16"', 'N = 2']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [sshll, 'N = 2']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vshll.s16"', 'N = 2']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [sshll, 'N = 2']]}]] - FnCall: [rustc_legacy_const_generics, ['1']] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable static_defs: ['const N: i32'] safety: safe types: @@ -9511,12 +9388,12 @@ intrinsics: arguments: ["a: {neon_type[0]}"] return_type: "{neon_type[1]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, ['"vshll.s32"', 'N = 2']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [sshll, 'N = 2']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vshll.s32"', 'N = 2']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [sshll, 'N = 2']]}]] - FnCall: [rustc_legacy_const_generics, ['1']] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable static_defs: ['const N: i32'] safety: safe types: @@ -9533,12 +9410,12 @@ intrinsics: arguments: ["a: {neon_type[0]}"] return_type: "{neon_type[1]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, ['"vshll.u8"', 'N = 2']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [ushll, 'N = 2']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vshll.u8"', 'N = 2']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [ushll, 'N = 2']]}]] - FnCall: [rustc_legacy_const_generics, ['1']] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable static_defs: ['const N: i32'] safety: safe types: @@ -9555,12 +9432,12 @@ intrinsics: arguments: ["a: {neon_type[0]}"] return_type: "{neon_type[1]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, ['"vshll.u16"', 'N = 2']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [ushll, 'N = 2']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vshll.u16"', 'N = 2']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [ushll, 'N = 2']]}]] - FnCall: [rustc_legacy_const_generics, ['1']] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable static_defs: ['const N: i32'] safety: safe types: @@ -9577,12 +9454,12 @@ intrinsics: arguments: ["a: {neon_type[0]}"] return_type: "{neon_type[1]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, ['"vshll.u32"', 'N = 2']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [ushll, 'N = 2']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vshll.u32"', 'N = 2']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [ushll, 'N = 2']]}]] - FnCall: [rustc_legacy_const_generics, ['1']] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable static_defs: ['const N: i32'] safety: safe types: @@ -9599,12 +9476,12 @@ intrinsics: arguments: ["a: {neon_type[0]}"] return_type: "{neon_type[0]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, ['"vshr.{neon_type[0]}"', 'N = 2']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [sshr, 'N = 2']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vshr.{neon_type[0]}"', 'N = 2']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [sshr, 'N = 2']]}]] - FnCall: [rustc_legacy_const_generics, ['1']] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable static_defs: ['const N: i32'] safety: safe types: @@ -9629,12 +9506,12 @@ intrinsics: arguments: ["a: {neon_type[0]}"] return_type: "{neon_type[0]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, ['"vshr.{neon_type[0]}"', 'N = 2']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [ushr, 'N = 2']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vshr.{neon_type[0]}"', 'N = 2']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [ushr, 'N = 2']]}]] - FnCall: [rustc_legacy_const_generics, ['1']] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable static_defs: ['const N: i32'] safety: safe types: @@ -9659,12 +9536,12 @@ intrinsics: arguments: ["a: {neon_type[0]}"] return_type: "{neon_type[1]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, ['"vshrn{type[2]}"', 'N = 2']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [shrn, 'N = 2']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vshrn{type[2]}"', 'N = 2']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [shrn, 'N = 2']]}]] - FnCall: [rustc_legacy_const_generics, ['1']] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable static_defs: ['const N: i32'] safety: safe types: @@ -9688,12 +9565,12 @@ intrinsics: arguments: ["a: {neon_type[0]}", "b: {neon_type[0]}"] return_type: "{neon_type[0]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, [vsra, 'N = 2']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [ssra, 'N = 2']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vsra, 'N = 2']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [ssra, 'N = 2']]}]] - FnCall: [rustc_legacy_const_generics, ['2']] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable static_defs: ['const N: i32'] safety: safe types: @@ -9717,11 +9594,11 @@ intrinsics: arguments: ["a: {neon_type[0]}", "b: {neon_type[0]}"] return_type: "{neon_type[1]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, [vtrn]]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [trn]]}]] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vtrn]]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [trn]]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - [int8x8_t, int8x8x2_t, '[0, 8, 2, 10, 4, 12, 6, 14]', '[1, 9, 3, 11, 5, 13, 7, 15]'] @@ -9759,8 +9636,8 @@ intrinsics: return_type: "{neon_type[1]}" attr: - *neon-v7 - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, [vtrn]]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [trn]]}]] + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vtrn]]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [trn]]}]] - *neon-fp16 - *neon-unstable-f16 safety: safe @@ -9786,11 +9663,11 @@ intrinsics: arguments: ["a: {neon_type[0]}", "b: {neon_type[0]}"] return_type: "{neon_type[1]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, [vtrn]]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [zip]]}]] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vtrn]]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [zip]]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - [int32x2_t, int32x2x2_t, '[0, 2]', '[1, 3]'] @@ -9817,8 +9694,8 @@ intrinsics: - *neon-v7 - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vorr]]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [zip]]}]] - - *neon-stable-not-arm - - *neon-unstable-is-arm + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - [int8x16_t, int8x16x2_t, '[0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23]', '[8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31]'] @@ -9851,8 +9728,8 @@ intrinsics: - *neon-v7 - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vtrn]]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [zip]]}]] - - *neon-stable-not-arm - - *neon-unstable-is-arm + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - [int32x2_t, int32x2x2_t, '[0, 2]', '[1, 3]'] @@ -9879,8 +9756,8 @@ intrinsics: - *neon-v7 - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vzip]]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [zip]]}]] - - *neon-stable-not-arm - - *neon-unstable-is-arm + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - [int8x8_t, int8x8x2_t, '[0, 8, 1, 9, 2, 10, 3, 11]', '[4, 12, 5, 13, 6, 14, 7, 15]'] @@ -9938,8 +9815,8 @@ intrinsics: - *neon-v7 - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vuzp]]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [uzp]]}]] - - *neon-stable-not-arm - - *neon-unstable-is-arm + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - [int8x8_t, int8x8x2_t, '[0, 2, 4, 6, 8, 10, 12, 14]', '[1, 3, 5, 7, 9, 11, 13, 15]'] @@ -10007,8 +9884,8 @@ intrinsics: - *neon-v7 - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vtrn]]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [zip]]}]] - - *neon-stable-not-arm - - *neon-unstable-is-arm + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - [float32x2_t, float32x2x2_t, '[0, 2]', '[1, 3]'] @@ -10032,11 +9909,11 @@ intrinsics: arguments: ["a: {neon_type[0]}", "b: {neon_type[1]}", "c: {neon_type[1]}"] return_type: "{neon_type[0]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, ['"vabal.{type[2]}"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [uabal]]}]] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vabal.{type[2]}"']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [uabal]]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - [uint16x8_t, uint8x8_t, "u8"] @@ -10051,11 +9928,11 @@ intrinsics: arguments: ["a: {neon_type[0]}", "b: {neon_type[1]}", "c: {neon_type[1]}"] return_type: "{neon_type[0]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, ['"vabal.{neon_type[1]}"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [sabal]]}]] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vabal.{neon_type[1]}"']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [sabal]]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - [int16x8_t, int8x8_t, uint8x8_t] @@ -10074,8 +9951,8 @@ intrinsics: - *neon-v7 - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vqabs.{neon_type}"']]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [sqabs]]}]] - - *neon-stable-not-arm - - *neon-unstable-is-arm + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - int8x8_t @@ -10097,11 +9974,11 @@ intrinsics: doc: "Store multiple single-element structures to one, two, three, or four registers" arguments: ["a: {type[0]}", "b: {neon_type[1]}"] attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, [vst1]]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [st1]]}]] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vst1]]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [st1]]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: unsafe: [neon] types: @@ -10151,12 +10028,12 @@ intrinsics: doc: "Store multiple single-element structures to one, two, three, or four registers" arguments: ["a: {type[0]}", "b: {neon_type[1]}"] attr: - - FnCall: [target_feature, ['enable = "neon,aes"']] - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v8"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, [vst1]]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [st1]]}]] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-aes + - *neon-v8 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vst1]]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [st1]]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: unsafe: [neon] types: @@ -10171,12 +10048,12 @@ intrinsics: doc: "Store multiple single-element structures to one, two, three, or four registers" arguments: ["a: {type[0]}", "b: {neon_type[1]}"] attr: - - FnCall: [target_feature, ['enable = "neon,aes"']] - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v8"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, [nop]]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [st1]]}]] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-aes + - *neon-v8 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [nop]]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [st1]]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: unsafe: [neon] types: @@ -10195,10 +10072,10 @@ intrinsics: doc: "Store multiple single-element structures to one, two, three, or four registers" arguments: ["a: {type[0]}", "b: {neon_type[1]}"] attr: - - FnCall: [cfg, ['target_arch = "arm"']] - - FnCall: [target_feature, ['enable = "neon,v7"']] + - *target-is-arm + - *enable-v7 - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [vst1]]}]] - - FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']] + - *neon-arm-unstable safety: unsafe: [neon] types: @@ -10222,7 +10099,7 @@ intrinsics: doc: "Store multiple single-element structures to one, two, three, or four registers" arguments: ["a: {type[0]}", "b: {neon_type[1]}"] attr: - - FnCall: [cfg, ['target_arch = "arm"']] + - *target-is-arm - *neon-v7 - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [vst1]]}]] - *neon-fp16 @@ -10275,7 +10152,7 @@ intrinsics: doc: "Store multiple single-element structures to one, two, three, or four registers" arguments: ["a: {type[0]}", "b: {neon_type[1]}"] attr: - - FnCall: [cfg, ['target_arch = "arm"']] + - *target-is-arm - *neon-v7 - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [vst1]]}]] - *neon-fp16 @@ -10304,7 +10181,7 @@ intrinsics: attr: - FnCall: [cfg, [{FnCall: [not, ['target_arch = "arm"']]}]] - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [st1]]}]] - - FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']] + - *neon-stable safety: unsafe: [neon] types: @@ -10328,7 +10205,7 @@ intrinsics: attr: - FnCall: [cfg, [{FnCall: [not, ['target_arch = "arm"']]}]] - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [st1]]}]] - - FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']] + - *neon-stable safety: unsafe: [neon] types: @@ -10381,7 +10258,7 @@ intrinsics: attr: - FnCall: [cfg, [{FnCall: [not, ['target_arch = "arm"']]}]] - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [st1]]}]] - - FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']] + - *neon-stable safety: unsafe: [neon] types: @@ -10453,10 +10330,10 @@ intrinsics: return_type: "{neon_type}" attr: - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "vfp4"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, [vfms]]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [fmls]]}]] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vfms]]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [fmls]]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - float32x2_t @@ -10470,11 +10347,11 @@ intrinsics: arguments: ["a: {neon_type[0]}", "b: {neon_type[0]}"] return_type: "{neon_type[0]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, [vmul]]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [pmul]]}]] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vmul]]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [pmul]]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - [poly8x8_t, int8x8_t] @@ -10493,11 +10370,11 @@ intrinsics: arguments: ["a: {neon_type}", "b: {neon_type}", "c: {neon_type}"] return_type: "{neon_type}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, ['"vmls.f32"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [fmul]]}]] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vmls.f32"']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [fmul]]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - float32x2_t @@ -10510,11 +10387,11 @@ intrinsics: arguments: ["a: {neon_type}", "b: {neon_type}"] return_type: "{neon_type}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, ['"vcge.{neon_type}"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [cmhs]]}]] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vcge.{neon_type}"']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [cmhs]]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - uint8x8_t @@ -10531,11 +10408,11 @@ intrinsics: arguments: ["a: {neon_type[0]}", "b: {neon_type[0]}"] return_type: "{neon_type[1]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, ['"vcge.f32"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [fcmge]]}]] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vcge.f32"']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [fcmge]]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - [float32x2_t, uint32x2_t] @@ -10549,8 +10426,8 @@ intrinsics: return_type: "{neon_type[1]}" attr: - *neon-v8 - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, ['"vcge.f16"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [fcmge]]}]] + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vcge.f16"']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [fcmge]]}]] - *neon-fp16 - *neon-unstable-f16 safety: safe @@ -10567,8 +10444,8 @@ intrinsics: return_type: "{neon_type[1]}" attr: - *neon-v8 - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, ['"vcge.f16"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [fcmge]]}]] + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vcge.f16"']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [fcmge]]}]] - *neon-fp16 - *neon-unstable-f16 safety: safe @@ -10587,11 +10464,11 @@ intrinsics: arguments: ["a: {neon_type}", "b: {neon_type}"] return_type: "{neon_type}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, ['"vcgt.{neon_type}"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [cmhi]]}]] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vcgt.{neon_type}"']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [cmhi]]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - uint8x8_t @@ -10608,11 +10485,11 @@ intrinsics: arguments: ["a: {neon_type[0]}", "b: {neon_type[0]}"] return_type: "{neon_type[0]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, [vtst]]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [cmtst]]}]] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vtst]]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [cmtst]]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - [uint8x8_t, u8x8, 'u8x8::new(0, 0, 0, 0, 0, 0, 0, 0)'] @@ -10631,12 +10508,12 @@ intrinsics: arguments: ["a: {neon_type[0]}"] return_type: "{neon_type[0]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, [vshl, 'N = 2']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [shl, 'N = 2']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vshl, 'N = 2']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [shl, 'N = 2']]}]] - FnCall: [rustc_legacy_const_generics, ['1']] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable static_defs: ['const N: i32'] safety: safe types: @@ -10668,12 +10545,12 @@ intrinsics: arguments: ["a: {neon_type[0]}", "b: {neon_type[0]}"] return_type: "{neon_type[0]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, [vsra, 'N = 2']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [usra, 'N = 2']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vsra, 'N = 2']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [usra, 'N = 2']]}]] - FnCall: [rustc_legacy_const_generics, ['2']] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable static_defs: ['const N: i32'] safety: safe types: @@ -10697,12 +10574,12 @@ intrinsics: arguments: ["a: {neon_type[0]}", "b: {neon_type[0]}"] return_type: "{neon_type[0]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, [vrsra, 'N = 2']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [ursra, 'N = 2']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vrsra, 'N = 2']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [ursra, 'N = 2']]}]] - FnCall: [rustc_legacy_const_generics, ['2']] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable static_defs: ['const N: i32'] safety: safe types: @@ -10730,7 +10607,7 @@ intrinsics: - *enable-v7 - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [vqrshrn, 'N = 2']]}]] - FnCall: [rustc_legacy_const_generics, ['1']] - - FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']] + - *neon-arm-unstable static_defs: ['const N: i32'] safety: safe types: @@ -10757,7 +10634,7 @@ intrinsics: - *target-not-arm - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [uqrshrn, 'N = 2']]}]] - FnCall: [rustc_legacy_const_generics, ['1']] - - FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']] + - *neon-stable static_defs: ['const N: i32'] safety: safe types: @@ -10781,11 +10658,11 @@ intrinsics: arguments: ["a: {neon_type[0]}"] return_type: "{neon_type[1]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, [vcvt]]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [fcvtzu]]}]] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vcvt]]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [fcvtzu]]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - [float32x2_t, uint32x2_t] @@ -10806,8 +10683,8 @@ intrinsics: return_type: "{neon_type[1]}" attr: - *neon-v8 - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, [vcvt]]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [fcvtzu]]}]] + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vcvt]]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [fcvtzu]]}]] - *neon-fp16 - *neon-unstable-f16 safety: safe @@ -10825,8 +10702,8 @@ intrinsics: return_type: "{neon_type[1]}" attr: - *neon-v7 - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, [vcvt.f16.f32]]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [fcvtn]]}]] + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vcvt.f16.f32]]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [fcvtn]]}]] - *neon-fp16 - *neon-unstable-f16 safety: safe @@ -10841,8 +10718,8 @@ intrinsics: return_type: "{neon_type[1]}" attr: - *neon-v7 - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, [vcvt]]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [fcvtl]]}]] + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vcvt]]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [fcvtl]]}]] - *neon-fp16 - *neon-unstable-f16 safety: safe @@ -10856,11 +10733,11 @@ intrinsics: arguments: ["a: {neon_type[0]}", "b: {neon_type[0]}", "c: {type[1]}"] return_type: "{neon_type[0]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, ['"vmla.i16"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [mla]]}]] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vmla.i16"']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [mla]]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - [int16x4_t, "i16", int16x4_t] @@ -10879,11 +10756,11 @@ intrinsics: arguments: ["a: {neon_type[0]}", "b: {neon_type[0]}", "c: {type[1]}"] return_type: "{neon_type[0]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, ['"vmla.i32"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [mla]]}]] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vmla.i32"']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [mla]]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - [int32x2_t, "i32", int32x2_t] @@ -10902,11 +10779,11 @@ intrinsics: arguments: ["a: {neon_type[0]}", "b: {neon_type[0]}", "c: {type[1]}"] return_type: "{neon_type[0]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, ['"vmla.f32"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [fmul]]}]] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vmla.f32"']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [fmul]]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - [float32x2_t, "f32", float32x2_t] @@ -10919,12 +10796,12 @@ intrinsics: arguments: ["a: {neon_type[1]}", "b: {neon_type[1]}", "c: {neon_type[2]}"] return_type: "{neon_type[1]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, ['"vmla.i16"', 'LANE = 1']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [mla, 'LANE = 1']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vmla.i16"', 'LANE = 1']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [mla, 'LANE = 1']]}]] - FnCall: [rustc_legacy_const_generics, ['3']] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable static_defs: ['const LANE: i32'] safety: safe types: @@ -10949,12 +10826,12 @@ intrinsics: arguments: ["a: {neon_type[1]}", "b: {neon_type[1]}", "c: {neon_type[2]}"] return_type: "{neon_type[1]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, ['"vmla.i32"', 'LANE = 1']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [mla, 'LANE = 1']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vmla.i32"', 'LANE = 1']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [mla, 'LANE = 1']]}]] - FnCall: [rustc_legacy_const_generics, ['3']] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable static_defs: ['const LANE: i32'] safety: safe types: @@ -10979,12 +10856,12 @@ intrinsics: arguments: ["a: {neon_type[1]}", "b: {neon_type[1]}", "c: {neon_type[2]}"] return_type: "{neon_type[1]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, ['"vmla.f32"', 'LANE = 1']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [fmul, 'LANE = 1']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vmla.f32"', 'LANE = 1']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [fmul, 'LANE = 1']]}]] - FnCall: [rustc_legacy_const_generics, ['3']] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable static_defs: ['const LANE: i32'] safety: safe types: @@ -11005,11 +10882,11 @@ intrinsics: arguments: ["a: {neon_type[0]}", "b: {neon_type[0]}", "c: {type[1]}"] return_type: "{neon_type[0]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, ['"vmls.i16"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [mls]]}]] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vmls.i16"']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [mls]]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - [int16x4_t, "i16", int16x4_t] @@ -11028,11 +10905,11 @@ intrinsics: arguments: ["a: {neon_type[0]}", "b: {neon_type[0]}", "c: {type[1]}"] return_type: "{neon_type[0]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, ['"vmls.i32"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [mls]]}]] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vmls.i32"']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [mls]]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - [int32x2_t, "i32", int32x2_t] @@ -11051,11 +10928,11 @@ intrinsics: arguments: ["a: {neon_type[0]}", "b: {neon_type[0]}", "c: {type[1]}"] return_type: "{neon_type[0]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, ['"vmls.f32"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [fmul]]}]] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vmls.f32"']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [fmul]]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - [float32x2_t, "f32", float32x2_t] @@ -11068,12 +10945,12 @@ intrinsics: arguments: ["a: {neon_type[1]}", "b: {neon_type[1]}", "c: {neon_type[2]}"] return_type: "{neon_type[1]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, ['"vmls.i16"', 'LANE = 1']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [mls, 'LANE = 1']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vmls.i16"', 'LANE = 1']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [mls, 'LANE = 1']]}]] - FnCall: [rustc_legacy_const_generics, ['3']] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable static_defs: ['const LANE: i32'] safety: safe types: @@ -11098,12 +10975,12 @@ intrinsics: arguments: ["a: {neon_type[1]}", "b: {neon_type[1]}", "c: {neon_type[2]}"] return_type: "{neon_type[1]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, ['"vmls.i32"', 'LANE = 1']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [mls, 'LANE = 1']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vmls.i32"', 'LANE = 1']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [mls, 'LANE = 1']]}]] - FnCall: [rustc_legacy_const_generics, ['3']] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable static_defs: ['const LANE: i32'] safety: safe types: @@ -11128,12 +11005,12 @@ intrinsics: arguments: ["a: {neon_type[1]}", "b: {neon_type[1]}", "c: {neon_type[2]}"] return_type: "{neon_type[1]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, ['"vmls.f32"', 'LANE = 1']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [fmul, 'LANE = 1']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vmls.f32"', 'LANE = 1']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [fmul, 'LANE = 1']]}]] - FnCall: [rustc_legacy_const_generics, ['3']] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable static_defs: ['const LANE: i32'] safety: safe types: @@ -11154,11 +11031,11 @@ intrinsics: arguments: ["a: {neon_type[0]}", "b: {type[1]}"] return_type: "{neon_type[0]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, [vmul]]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [mul]]}]] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vmul]]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [mul]]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - [int16x4_t, "i16"] @@ -11180,11 +11057,11 @@ intrinsics: arguments: ["a: {neon_type[0]}", "b: {type[1]}"] return_type: "{neon_type[0]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, [vmul]]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [fmul]]}]] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vmul]]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [fmul]]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - [float32x2_t, "f32"] @@ -11202,8 +11079,8 @@ intrinsics: return_type: "{neon_type[0]}" attr: - *neon-v8 - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, [vmul]]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [fmul]]}]] + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vmul]]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [fmul]]}]] - *neon-fp16 - *neon-unstable-f16 safety: safe @@ -11222,12 +11099,12 @@ intrinsics: arguments: ["a: {neon_type[0]}", "b: {neon_type[1]}"] return_type: "{neon_type[0]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, [vmul, 'LANE = 0']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [fmul, 'LANE = 0']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vmul, 'LANE = 0']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [fmul, 'LANE = 0']]}]] - FnCall: [rustc_legacy_const_generics, ['2']] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable static_defs: ['const LANE: i32'] safety: safe types: @@ -11247,12 +11124,12 @@ intrinsics: arguments: ["a: {neon_type[1]}", "b: {neon_type[2]}"] return_type: "{neon_type[1]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, [vqrdmulh, 'LANE = 1']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [sqrdmulh, 'LANE = 1']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vqrdmulh, 'LANE = 1']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [sqrdmulh, 'LANE = 1']]}]] - FnCall: [rustc_legacy_const_generics, ['2']] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable static_defs: ['const LANE: i32'] safety: safe types: @@ -11274,11 +11151,11 @@ intrinsics: arguments: ["a: {neon_type[0]}", "b: {type[1]}"] return_type: "{neon_type[0]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, [vqrdmulh]]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [sqrdmulh]]}]] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vqrdmulh]]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [sqrdmulh]]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - [int16x4_t, "i16"] @@ -11296,11 +11173,11 @@ intrinsics: arguments: ["a: {neon_type[0]}", "b: {neon_type[0]}"] return_type: "{neon_type[1]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, ['"vcgt.f32"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [fcmgt]]}]] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vcgt.f32"']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [fcmgt]]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - [float32x2_t, uint32x2_t] @@ -11314,8 +11191,8 @@ intrinsics: return_type: "{neon_type[1]}" attr: - *neon-v8 - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, ['"vcgt.f16"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [fcmgt]]}]] + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vcgt.f16"']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [fcmgt]]}]] - *neon-fp16 - *neon-unstable-f16 safety: safe @@ -11332,8 +11209,8 @@ intrinsics: return_type: "{neon_type[1]}" attr: - *neon-v8 - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, ['"vclt.f16"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [fcmlt]]}]] + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vclt.f16"']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [fcmlt]]}]] - *neon-fp16 - *neon-unstable-f16 safety: safe @@ -11352,11 +11229,11 @@ intrinsics: arguments: ["a: {neon_type[0]}", "b: {neon_type[0]}"] return_type: "{neon_type[1]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, ['"vabdl.{neon_type[0]}"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [uabdl]]}]] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vabdl.{neon_type[0]}"']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [uabdl]]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - [uint8x8_t, uint16x8_t] @@ -11370,12 +11247,12 @@ intrinsics: arguments: ["a: {neon_type[0]}", "b: {neon_type[1]}"] return_type: "{neon_type[2]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, [vmull, 'LANE = 1']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [smull, 'LANE = 1']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vmull, 'LANE = 1']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [smull, 'LANE = 1']]}]] - FnCall: [rustc_legacy_const_generics, ['2']] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable static_defs: ['const LANE: i32'] safety: safe types: @@ -11395,12 +11272,12 @@ intrinsics: arguments: ["a: {neon_type[0]}", "b: {neon_type[1]}"] return_type: "{neon_type[2]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, [vmull, 'LANE = 1']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [umull, 'LANE = 1']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vmull, 'LANE = 1']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [umull, 'LANE = 1']]}]] - FnCall: [rustc_legacy_const_generics, ['2']] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable static_defs: ['const LANE: i32'] safety: safe types: @@ -11421,10 +11298,10 @@ intrinsics: return_type: "{neon_type[0]}" attr: - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "vfp4"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, [vfms]]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [fmls]]}]] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vfms]]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [fmls]]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - [float32x2_t, "f32"] @@ -11444,8 +11321,8 @@ intrinsics: attr: - *neon-v8 - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "vfp4"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, [nop]]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [fmls]]}]] + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [nop]]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [fmls]]}]] - *neon-fp16 - *neon-unstable-f16 safety: safe @@ -11461,12 +11338,12 @@ intrinsics: arguments: ["a: {neon_type[0]}", "b: {neon_type[1]}"] return_type: "{neon_type[0]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, [vqdmulh, 'LANE = 0']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [sqdmulh, 'LANE = 0']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vqdmulh, 'LANE = 0']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [sqdmulh, 'LANE = 0']]}]] - FnCall: [rustc_legacy_const_generics, ['2']] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable static_defs: ['const LANE: i32'] safety: safe types: @@ -11488,11 +11365,11 @@ intrinsics: arguments: ["a: {neon_type}"] return_type: "{neon_type}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, [vrecpe]]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [urecpe]]}]] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vrecpe]]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [urecpe]]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - uint32x2_t @@ -11511,11 +11388,11 @@ intrinsics: arguments: ["a: {neon_type}"] return_type: "{neon_type}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, [vrsqrte]]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [ursqrte]]}]] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vrsqrte]]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [ursqrte]]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - uint32x2_t @@ -11534,11 +11411,11 @@ intrinsics: arguments: ["a: {neon_type}"] return_type: "{neon_type}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, [vrsqrte]]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [frsqrte]]}]] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vrsqrte]]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [frsqrte]]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - float32x2_t @@ -11560,8 +11437,8 @@ intrinsics: attr: - *neon-v8 - *neon-fp16 - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, [vrsqrte]]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [frsqrte]]}]] + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vrsqrte]]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [frsqrte]]}]] - *neon-unstable-f16 safety: safe types: @@ -11586,7 +11463,7 @@ intrinsics: - *enable-v7 - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [vqshlu, 'N = 2']]}]] - FnCall: [rustc_legacy_const_generics, ['1']] - - FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']] + - *neon-arm-unstable static_defs: ['const N: i32'] safety: safe types: @@ -11618,7 +11495,7 @@ intrinsics: - FnCall: [cfg, [{FnCall: [not, ['target_arch = "arm"']]}]] - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [sqshlu, 'N = 2']]}]] - FnCall: [rustc_legacy_const_generics, ['1']] - - FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']] + - *neon-stable static_defs: ['const N: i32'] safety: safe types: @@ -11647,11 +11524,11 @@ intrinsics: arguments: ["a: {neon_type[0]}"] return_type: "{neon_type[1]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, [vcvt]]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [fcvtzs]]}]] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vcvt]]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [fcvtzs]]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - [float32x2_t, int32x2_t] @@ -11672,8 +11549,8 @@ intrinsics: return_type: "{neon_type[1]}" attr: - *neon-v7 - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, [vcvt]]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [fcvtzs]]}]] + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vcvt]]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [fcvtzs]]}]] - *neon-fp16 - *neon-unstable-f16 safety: safe @@ -11690,11 +11567,11 @@ intrinsics: arguments: ["a: {neon_type[0]}"] return_type: "{neon_type[1]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, [vqmovn]]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [uqxtn]]}]] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vqmovn]]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [uqxtn]]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - [uint16x8_t, uint8x8_t] @@ -11714,11 +11591,11 @@ intrinsics: arguments: ["a: {neon_type}", "b: {neon_type}"] return_type: "{neon_type}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, ['"vcge.{neon_type}"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [cmhs]]}]] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vcge.{neon_type}"']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [cmhs]]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - uint8x8_t @@ -11735,10 +11612,10 @@ intrinsics: arguments: ["a: {type[0]}"] return_type: "{neon_type[1]}" attr: - - FnCall: [cfg, ['target_arch = "arm"']] - - FnCall: [target_feature, ['enable = "neon,v7"']] + - *target-is-arm + - *enable-v7 - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [vld4]]}]] - - FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']] + - *neon-arm-unstable safety: unsafe: [neon] types: @@ -11768,7 +11645,7 @@ intrinsics: attr: - FnCall: [cfg, [{FnCall: [not, ['target_arch = "arm"']]}]] - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [ld4r]]}]] - - FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']] + - *neon-stable safety: unsafe: [neon] types: @@ -11796,10 +11673,10 @@ intrinsics: arguments: ["a: {type[0]}"] return_type: "{neon_type[1]}" attr: - - FnCall: [cfg, ['target_arch = "arm"']] - - FnCall: [target_feature, ['enable = "neon,v7"']] + - *target-is-arm + - *enable-v7 - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [nop]]}]] - - FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']] + - *neon-arm-unstable safety: unsafe: [neon] types: @@ -11820,11 +11697,11 @@ intrinsics: arguments: ["a: {type[0]}"] return_type: "{neon_type[1]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, [vld4]]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [ld4r]]}]] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vld4]]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [ld4r]]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: unsafe: [neon] types: @@ -11848,11 +11725,11 @@ intrinsics: arguments: ["a: {type[0]}"] return_type: "{neon_type[1]}" attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, [nop]]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [ld4r]]}]] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [nop]]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [ld4r]]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: unsafe: [neon] types: @@ -11867,12 +11744,12 @@ intrinsics: arguments: ["a: {type[0]}"] return_type: "{neon_type[1]}" attr: - - FnCall: [target_feature, ['enable = "neon,aes"']] - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v8"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, [nop]]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [ld4r]]}]] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] + - *neon-aes + - *neon-v8 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [nop]]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [ld4r]]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: unsafe: [neon] types: @@ -11888,10 +11765,10 @@ intrinsics: arguments: ["a: {type[1]}", "b: {type[2]}"] return_type: "{neon_type[3]}" attr: - - FnCall: [cfg, ['target_arch = "arm"']] - - FnCall: [target_feature, ['enable = "neon,v7"']] + - *target-is-arm + - *enable-v7 # - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vld1]]}]] - - FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']] + - *neon-arm-unstable safety: unsafe: [neon] types: @@ -11920,8 +11797,8 @@ intrinsics: arguments: ["a: {type[1]}", "b: {type[2]}"] return_type: "{neon_type[3]}" attr: - - FnCall: [cfg, ['target_arch = "arm"']] - - FnCall: [target_feature, ['enable = "neon,v7"']] + - *target-is-arm + - *enable-v7 - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [nop]]}]] - *neon-fp16 - *neon-unstable-f16 @@ -11946,9 +11823,9 @@ intrinsics: safety: unsafe: [neon] attr: - - FnCall: [cfg, ['target_arch = "arm"']] - - FnCall: [target_feature, ['enable = "neon,v7"']] - - *neon-unstable + - *target-is-arm + - *enable-v7 + - *neon-arm-unstable - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['{type[2]}']]}]] types: - ['*const i8', int8x8_t, '"vld1.8"', 'crate::mem::align_of::() as i32', '_v8i8'] @@ -11972,9 +11849,9 @@ intrinsics: safety: unsafe: [neon] attr: - - FnCall: [cfg, ['target_arch = "arm"']] + - *target-is-arm - FnCall: [target_feature, ['enable = "{type[3]}"']] - - *neon-unstable + - *neon-arm-unstable - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['{type[2]}']]}]] types: - ['*const u8', uint8x8_t, '"vld1.8"', 'neon,v7', 'crate::mem::align_of::() as i32', '_v8i8'] @@ -12007,7 +11884,7 @@ intrinsics: safety: unsafe: [neon] attr: - - FnCall: [cfg, ['target_arch = "arm"']] + - *target-is-arm - FnCall: [target_feature, ['enable = "{type[3]}"']] - *neon-fp16 - *neon-unstable-f16 @@ -12030,9 +11907,9 @@ intrinsics: safety: unsafe: [neon] attr: - - FnCall: [cfg, ['target_arch = "arm"']] - - FnCall: [target_feature, ['enable = "neon,aes"']] - - *neon-unstable + - *target-is-arm + - *neon-aes + - *neon-arm-unstable - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['vldr']]}]] types: - ['*const p64', poly64x1_t] @@ -12050,7 +11927,7 @@ intrinsics: attr: - *enable-v7 - *target-is-arm - - *neon-unstable + - *neon-arm-unstable assert_instr: [vtbx] safety: safe types: @@ -12069,7 +11946,7 @@ intrinsics: attr: - *enable-v7 - *target-is-arm - - *neon-unstable + - *neon-arm-unstable assert_instr: [vtbx] safety: safe types: @@ -12084,7 +11961,7 @@ intrinsics: attr: - *enable-v7 - *target-is-arm - - *neon-unstable + - *neon-arm-unstable assert_instr: [vtbx] safety: safe types: @@ -12107,7 +11984,7 @@ intrinsics: attr: - *enable-v7 - *target-is-arm - - *neon-unstable + - *neon-arm-unstable assert_instr: [vtbx] safety: safe types: @@ -12126,7 +12003,7 @@ intrinsics: attr: - *enable-v7 - *target-is-arm - - *neon-unstable + - *neon-arm-unstable assert_instr: [vtbx] safety: safe types: @@ -12141,7 +12018,7 @@ intrinsics: attr: - *enable-v7 - *target-is-arm - - *neon-unstable + - *neon-arm-unstable assert_instr: [vtbx] safety: safe types: @@ -12165,7 +12042,7 @@ intrinsics: attr: - *enable-v7 - *target-is-arm - - *neon-unstable + - *neon-arm-unstable assert_instr: [vtbx] safety: safe types: @@ -12184,7 +12061,7 @@ intrinsics: attr: - *enable-v7 - *target-is-arm - - *neon-unstable + - *neon-arm-unstable assert_instr: [vtbx] safety: safe types: @@ -12199,7 +12076,7 @@ intrinsics: attr: - *enable-v7 - *target-is-arm - - *neon-unstable + - *neon-arm-unstable assert_instr: [vtbx] safety: safe types: @@ -12224,7 +12101,7 @@ intrinsics: attr: - *enable-v7 - *target-is-arm - - *neon-unstable + - *neon-arm-unstable assert_instr: [vtbx] safety: safe types: @@ -12243,7 +12120,7 @@ intrinsics: attr: - *enable-v7 - *target-is-arm - - *neon-unstable + - *neon-arm-unstable assert_instr: [vtbx] safety: safe types: @@ -12268,7 +12145,7 @@ intrinsics: attr: - *enable-v7 - *target-is-arm - - *neon-unstable + - *neon-arm-unstable assert_instr: [vtbx] safety: safe types: @@ -12493,8 +12370,8 @@ intrinsics: attr: - *neon-v7 - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [nop]]}]] - - *neon-stable-not-arm - - *neon-unstable-is-arm + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - [float32x2_t, float32x4_t, '[0, 1, 2, 3]'] @@ -12520,7 +12397,7 @@ intrinsics: - FnCall: [target_feature, ['enable = "aes"']] - *neon-v8 - FnCall: [cfg_attr, [test, { FnCall: [assert_instr, [aese]] }]] - - *neon-unstable-is-arm + - *neon-cfg-arm-unstable - FnCall: [cfg_attr, [*not-arm, { FnCall: [stable, ['feature = "aarch64_neon_crypto_intrinsics"', 'since = "1.72.0"']] }]] safety: safe types: @@ -12542,7 +12419,7 @@ intrinsics: - FnCall: [target_feature, ['enable = "aes"']] - *neon-v8 - FnCall: [cfg_attr, [test, { FnCall: [assert_instr, [aesd]] }]] - - *neon-unstable-is-arm + - *neon-cfg-arm-unstable - FnCall: [cfg_attr, [*not-arm, { FnCall: [stable, ['feature = "aarch64_neon_crypto_intrinsics"', 'since = "1.72.0"']] }]] safety: safe types: @@ -12564,7 +12441,7 @@ intrinsics: - FnCall: [target_feature, ['enable = "aes"']] - *neon-v8 - FnCall: [cfg_attr, [test, { FnCall: [assert_instr, ["{type[1]}"]] }]] - - *neon-unstable-is-arm + - *neon-cfg-arm-unstable - FnCall: [cfg_attr, [*not-arm, { FnCall: [stable, ['feature = "aarch64_neon_crypto_intrinsics"', 'since = "1.72.0"']] }]] safety: safe types: @@ -12586,7 +12463,7 @@ intrinsics: - FnCall: [target_feature, ['enable = "aes"']] - *neon-v8 - FnCall: [cfg_attr, [test, { FnCall: [assert_instr, ["{type[1]}"]] }]] - - *neon-unstable-is-arm + - *neon-cfg-arm-unstable - FnCall: [cfg_attr, [*not-arm, { FnCall: [stable, ['feature = "aarch64_neon_crypto_intrinsics"', 'since = "1.72.0"']] }]] safety: safe types: @@ -12608,7 +12485,7 @@ intrinsics: - FnCall: [target_feature, ['enable = "sha2"']] - *neon-v8 - FnCall: [cfg_attr, [test, { FnCall: [assert_instr, ["{type[1]}"]] }]] - - *neon-unstable-is-arm + - *neon-cfg-arm-unstable - FnCall: [cfg_attr, [*not-arm, { FnCall: [stable, ['feature = "aarch64_neon_crypto_intrinsics"', 'since = "1.72.0"']] }]] safety: safe types: @@ -12630,7 +12507,7 @@ intrinsics: - FnCall: [target_feature, ['enable = "sha2"']] - *neon-v8 - FnCall: [cfg_attr, [test, { FnCall: [assert_instr, ["{type[1]}"]] }]] - - *neon-unstable-is-arm + - *neon-cfg-arm-unstable - FnCall: [cfg_attr, [*not-arm, { FnCall: [stable, ['feature = "aarch64_neon_crypto_intrinsics"', 'since = "1.72.0"']] }]] safety: safe types: @@ -12652,7 +12529,7 @@ intrinsics: - FnCall: [target_feature, ['enable = "sha2"']] - *neon-v8 - FnCall: [cfg_attr, [test, { FnCall: [assert_instr, ["{type[1]}"]] }]] - - *neon-unstable-is-arm + - *neon-cfg-arm-unstable - FnCall: [cfg_attr, [*not-arm, { FnCall: [stable, ['feature = "aarch64_neon_crypto_intrinsics"', 'since = "1.72.0"']] }]] safety: safe types: @@ -12674,7 +12551,7 @@ intrinsics: - FnCall: [target_feature, ['enable = "sha2"']] - *neon-v8 - FnCall: [cfg_attr, [test, { FnCall: [assert_instr, ["{type[1]}"]] }]] - - *neon-unstable-is-arm + - *neon-cfg-arm-unstable - FnCall: [cfg_attr, [*not-arm, { FnCall: [stable, ['feature = "aarch64_neon_crypto_intrinsics"', 'since = "1.72.0"']] }]] safety: safe types: @@ -12696,7 +12573,7 @@ intrinsics: - FnCall: [target_feature, ['enable = "sha2"']] - *neon-v8 - FnCall: [cfg_attr, [test, { FnCall: [assert_instr, ["{type[1]}"]] }]] - - *neon-unstable-is-arm + - *neon-cfg-arm-unstable - FnCall: [cfg_attr, [*not-arm, { FnCall: [stable, ['feature = "aarch64_neon_crypto_intrinsics"', 'since = "1.72.0"']] }]] safety: safe types: @@ -12718,7 +12595,7 @@ intrinsics: - FnCall: [target_feature, ['enable = "sha2"']] - *neon-v8 - FnCall: [cfg_attr, [test, { FnCall: [assert_instr, ["{type[1]}"]] }]] - - *neon-unstable-is-arm + - *neon-cfg-arm-unstable - FnCall: [cfg_attr, [*not-arm, { FnCall: [stable, ['feature = "aarch64_neon_crypto_intrinsics"', 'since = "1.72.0"']] }]] safety: safe types: @@ -12740,7 +12617,7 @@ intrinsics: - FnCall: [target_feature, ['enable = "sha2"']] - *neon-v8 - FnCall: [cfg_attr, [test, { FnCall: [assert_instr, ["{type[1]}"]] }]] - - *neon-unstable-is-arm + - *neon-cfg-arm-unstable - FnCall: [cfg_attr, [*not-arm, { FnCall: [stable, ['feature = "aarch64_neon_crypto_intrinsics"', 'since = "1.72.0"']] }]] safety: safe types: @@ -12762,7 +12639,7 @@ intrinsics: - FnCall: [target_feature, ['enable = "sha2"']] - *neon-v8 - FnCall: [cfg_attr, [test, { FnCall: [assert_instr, ["{type[1]}"]] }]] - - *neon-unstable-is-arm + - *neon-cfg-arm-unstable - FnCall: [cfg_attr, [*not-arm, { FnCall: [stable, ['feature = "aarch64_neon_crypto_intrinsics"', 'since = "1.72.0"']] }]] safety: safe types: @@ -12784,7 +12661,7 @@ intrinsics: - FnCall: [target_feature, ['enable = "sha2"']] - *neon-v8 - FnCall: [cfg_attr, [test, { FnCall: [assert_instr, ["{type[1]}"]] }]] - - *neon-unstable-is-arm + - *neon-cfg-arm-unstable - FnCall: [cfg_attr, [*not-arm, { FnCall: [stable, ['feature = "aarch64_neon_crypto_intrinsics"', 'since = "1.72.0"']] }]] safety: safe types: @@ -12806,7 +12683,7 @@ intrinsics: - FnCall: [target_feature, ['enable = "sha2"']] - *neon-v8 - FnCall: [cfg_attr, [test, { FnCall: [assert_instr, ["{type[1]}"]] }]] - - *neon-unstable-is-arm + - *neon-cfg-arm-unstable - FnCall: [cfg_attr, [*not-arm, { FnCall: [stable, ['feature = "aarch64_neon_crypto_intrinsics"', 'since = "1.72.0"']] }]] safety: safe types: @@ -12974,7 +12851,7 @@ intrinsics: return_type: "{type[0]}" attr: - FnCall: [target_feature, ['enable = "crc"']] - - FnCall: [cfg, ['target_arch = "arm"']] + - *target-is-arm - FnCall: [cfg_attr, [test, { FnCall: [assert_instr, ["crc32w"]] }]] - *arm-crc-unstable safety: safe @@ -12994,7 +12871,7 @@ intrinsics: return_type: "{type[0]}" attr: - FnCall: [target_feature, ['enable = "crc"']] - - FnCall: [cfg, ['target_arch = "arm"']] + - *target-is-arm - FnCall: [cfg_attr, [test, { FnCall: [assert_instr, ["crc32cw"]] }]] - *arm-crc-unstable safety: safe @@ -13013,8 +12890,8 @@ intrinsics: - *neon-v7 - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vabs]]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [abs]]}]] - - *neon-stable-not-arm - - *neon-unstable-is-arm + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - int8x8_t @@ -13040,8 +12917,8 @@ intrinsics: - *neon-v7 - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vpmin]]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [sminp]]}]] - - *neon-stable-not-arm - - *neon-unstable-is-arm + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - int8x8_t @@ -13064,8 +12941,8 @@ intrinsics: - *neon-v7 - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vpmin]]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [uminp]]}]] - - *neon-stable-not-arm - - *neon-unstable-is-arm + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - uint8x8_t @@ -13088,8 +12965,8 @@ intrinsics: - *neon-v7 - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vpmin]]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [fminp]]}]] - - *neon-stable-not-arm - - *neon-unstable-is-arm + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - float32x2_t @@ -13110,8 +12987,8 @@ intrinsics: - *neon-v7 - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vpmax]]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [smaxp]]}]] - - *neon-stable-not-arm - - *neon-unstable-is-arm + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - int8x8_t @@ -13134,8 +13011,8 @@ intrinsics: - *neon-v7 - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vpmax]]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [umaxp]]}]] - - *neon-stable-not-arm - - *neon-unstable-is-arm + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - uint8x8_t @@ -13158,8 +13035,8 @@ intrinsics: - *neon-v7 - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vpmax]]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [fmaxp]]}]] - - *neon-stable-not-arm - - *neon-unstable-is-arm + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - float32x2_t @@ -13180,8 +13057,8 @@ intrinsics: - *neon-v7 - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"{type[2]}"']]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [raddhn]]}]] - - *neon-stable-not-arm - - *neon-unstable-is-arm + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - [int16x8_t, int8x8_t, 'vraddhn.i16'] @@ -13204,8 +13081,8 @@ intrinsics: - *neon-v7 - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"{type[2]}"']]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [raddhn]]}]] - - *neon-stable-not-arm - - *neon-unstable-is-arm + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - [uint16x8_t, uint8x8_t, 'vraddhn.i16', int16x8_t] @@ -13227,8 +13104,8 @@ intrinsics: - *neon-v7 - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"{type[3]}"']]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [raddhn2]]}]] - - *neon-stable-not-arm - - *neon-unstable-is-arm + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - [uint8x8_t , uint16x8_t, uint8x16_t, 'vraddhn.i16', int16x8_t, '[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]'] @@ -13254,8 +13131,8 @@ intrinsics: - *neon-v7 - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"{type[3]}"']]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [raddhn2]]}]] - - *neon-stable-not-arm - - *neon-unstable-is-arm + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - [int8x8_t , int16x8_t, int8x16_t, 'vraddhn.i16', '[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]'] @@ -13278,8 +13155,8 @@ intrinsics: - *neon-v7 - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vpadd]]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [addp]]}]] - - *neon-stable-not-arm - - *neon-unstable-is-arm + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - int8x8_t @@ -13302,8 +13179,8 @@ intrinsics: - *neon-v7 - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vpadd]]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [addp]]}]] - - *neon-stable-not-arm - - *neon-unstable-is-arm + - *neon-not-arm-stable + - *neon-cfg-arm-unstable safety: safe types: - [uint8x8_t, int8x8_t] @@ -13327,7 +13204,7 @@ intrinsics: attr: - *neon-v7 - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['{type[2]}']]}]] - - *neon-unstable-is-arm + - *neon-cfg-arm-unstable types: - [int16x4_t, int8x8_t, '"vpadal.s8"'] - [int32x2_t, int16x4_t, '"vpadal.s16"'] @@ -13352,7 +13229,7 @@ intrinsics: attr: - *neon-v7 - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['{type[2]}']]}]] - - *neon-unstable-is-arm + - *neon-cfg-arm-unstable types: - [uint16x4_t, uint8x8_t , '"vpadal.u8"'] - [uint32x2_t, uint16x4_t, '"vpadal.u16"'] @@ -13376,8 +13253,8 @@ intrinsics: - *neon-v7 - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['{type[2]}']]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [saddlp]]}]] - - *neon-stable-not-arm - - *neon-unstable-is-arm + - *neon-not-arm-stable + - *neon-cfg-arm-unstable types: - [int8x8_t, int16x4_t , '"vpaddl.s8"'] - [int16x4_t, int32x2_t, '"vpaddl.s16"'] @@ -13403,8 +13280,8 @@ intrinsics: - *neon-v7 - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['{type[2]}']]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [uaddlp]]}]] - - *neon-stable-not-arm - - *neon-unstable-is-arm + - *neon-not-arm-stable + - *neon-cfg-arm-unstable types: - [uint8x8_t, uint16x4_t , '"vpaddl.u8"'] - [uint16x4_t, uint32x2_t, '"vpaddl.u16"'] @@ -13430,8 +13307,8 @@ intrinsics: - *neon-v7 - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"{type[2]}"']]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [sadalp]]}]] - - *neon-stable-not-arm - - *neon-unstable-is-arm + - *neon-not-arm-stable + - *neon-cfg-arm-unstable types: - [int16x4_t, int8x8_t, 'vpadal.s8', 'let x: int16x4_t; #[cfg(target_arch = "arm")] { x = priv_vpadal_s8(a, b); } #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] unsafe { x = simd_add(vpaddl_s8(b), a);}'] - [int32x2_t, int16x4_t, 'vpadal.s16', 'let x: int32x2_t; #[cfg(target_arch = "arm")] { x = priv_vpadal_s16(a, b); } #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] unsafe { x = simd_add(vpaddl_s16(b), a);}'] @@ -13452,8 +13329,8 @@ intrinsics: - *neon-v7 - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"{type[2]}"']]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [uadalp]]}]] - - *neon-stable-not-arm - - *neon-unstable-is-arm + - *neon-not-arm-stable + - *neon-cfg-arm-unstable types: - [uint16x4_t, uint8x8_t, 'vpadal.u8', 'let x: uint16x4_t; #[cfg(target_arch = "arm")] { x = priv_vpadal_u8(a, b); } #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] unsafe { x = simd_add(vpaddl_u8(b), a);}'] - [uint32x2_t, uint16x4_t, 'vpadal.u16', 'let x: uint32x2_t; #[cfg(target_arch = "arm")] { x = priv_vpadal_u16(a, b); } #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] unsafe { x = simd_add(vpaddl_u16(b), a);}'] @@ -13474,8 +13351,8 @@ intrinsics: - *neon-v7 - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vcnt]]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [cnt]]}]] - - *neon-stable-not-arm - - *neon-unstable-is-arm + - *neon-not-arm-stable + - *neon-cfg-arm-unstable types: - int8x8_t - int8x16_t @@ -13497,8 +13374,8 @@ intrinsics: - *neon-v7 - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vcnt]]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [cnt]]}]] - - *neon-stable-not-arm - - *neon-unstable-is-arm + - *neon-not-arm-stable + - *neon-cfg-arm-unstable types: - [uint8x8_t, int8x8_t] - [uint8x16_t, int8x16_t] @@ -13524,7 +13401,7 @@ intrinsics: - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [nop]]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [smmla]]}]] - *neon-unstable-i8mm - - *neon-unstable-is-arm + - *neon-cfg-arm-unstable types: - [int32x4_t, int8x16_t] compose: @@ -13547,7 +13424,7 @@ intrinsics: - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [nop]]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [ummla]]}]] - *neon-unstable-i8mm - - *neon-unstable-is-arm + - *neon-cfg-arm-unstable types: - [uint32x4_t, uint8x16_t] compose: @@ -13570,7 +13447,7 @@ intrinsics: - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [nop]]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [usmmla]]}]] - *neon-unstable-i8mm - - *neon-unstable-is-arm + - *neon-cfg-arm-unstable types: - [int32x4_t, uint8x16_t, int8x16_t] compose: @@ -13591,7 +13468,7 @@ intrinsics: attr: - *target-is-arm - *neon-v7 - - *neon-unstable + - *neon-arm-unstable assert_instr: [vtbl] types: - int8x8_t @@ -13610,7 +13487,7 @@ intrinsics: attr: - *target-is-arm - *neon-v7 - - *neon-unstable + - *neon-arm-unstable assert_instr: [vtbl] types: - int8x8_t @@ -13625,7 +13502,7 @@ intrinsics: attr: - *target-is-arm - *neon-v7 - - *neon-unstable + - *neon-arm-unstable assert_instr: [vtbl] types: - [uint8x8_t, uint8x8_t] @@ -13647,7 +13524,7 @@ intrinsics: attr: - *target-is-arm - *neon-v7 - - *neon-unstable + - *neon-arm-unstable assert_instr: [vtbl] types: - int8x8_t @@ -13666,7 +13543,7 @@ intrinsics: attr: - *target-is-arm - *neon-v7 - - *neon-unstable + - *neon-arm-unstable assert_instr: [vtbl] types: - [int8x8x2_t, int8x8_t] @@ -13681,7 +13558,7 @@ intrinsics: attr: - *target-is-arm - *neon-v7 - - *neon-unstable + - *neon-arm-unstable assert_instr: [vtbl] types: - [uint8x8x2_t, uint8x8_t] @@ -13704,7 +13581,7 @@ intrinsics: attr: - *target-is-arm - *neon-v7 - - *neon-unstable + - *neon-arm-unstable assert_instr: [vtbl] types: - int8x8_t @@ -13723,7 +13600,7 @@ intrinsics: attr: - *target-is-arm - *neon-v7 - - *neon-unstable + - *neon-arm-unstable assert_instr: [vtbl] types: - [int8x8x3_t, int8x8_t] @@ -13738,7 +13615,7 @@ intrinsics: attr: - *target-is-arm - *neon-v7 - - *neon-unstable + - *neon-arm-unstable assert_instr: [vtbl] types: - [uint8x8x3_t, uint8x8_t] @@ -13762,7 +13639,7 @@ intrinsics: attr: - *target-is-arm - *neon-v7 - - *neon-unstable + - *neon-arm-unstable assert_instr: [vtbl] types: - int8x8_t @@ -13781,7 +13658,7 @@ intrinsics: attr: - *target-is-arm - *neon-v7 - - *neon-unstable + - *neon-arm-unstable assert_instr: [vtbl] types: - [int8x8x4_t, int8x8_t] @@ -13796,7 +13673,7 @@ intrinsics: attr: - *target-is-arm - *neon-v7 - - *neon-unstable + - *neon-arm-unstable assert_instr: [vtbl] types: - [uint8x8x4_t, uint8x8_t] @@ -13821,7 +13698,7 @@ intrinsics: attr: - *target-is-arm - *neon-v7 - - *neon-unstable + - *neon-arm-unstable - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vst1.{type[4]}"']]}]] types: - ['_v8i8', '* const i8', int8x8_t, i32, '8'] @@ -13871,7 +13748,7 @@ intrinsics: attr: - *target-is-arm - *neon-v7 - - *neon-unstable + - *neon-arm-unstable - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vst1.{type[2]}"']]}]] types: - ['*mut i8', int8x8_t, '8', 'a', 'crate::mem::align_of::() as i32', '_v8i8'] @@ -13937,7 +13814,7 @@ intrinsics: attr: #- *target-is-arm #- *neon-v7 - - *neon-unstable + - *neon-arm-unstable types: - ['_v8i8', "int8x8_t", '8'] - ['_v16i8', 'int8x16_t', '8'] @@ -13962,7 +13839,7 @@ intrinsics: attr: - *target-is-arm - FnCall: [target_feature, ['enable = "{type[1]}"']] - - *neon-unstable + - *neon-arm-unstable - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vsri.{type[2]}"', 'N = 1']]}]] - FnCall: [rustc_legacy_const_generics, ['2']] static_defs: ['const N: i32'] @@ -13998,9 +13875,9 @@ intrinsics: return_type: "{neon_type[0]}" static_defs: ['const N: i32'] attr: - - FnCall: [target_feature, ['enable = "neon,v7"']] + - *enable-v7 - *target-is-arm - - *neon-unstable + - *neon-arm-unstable - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vsri.{type[1]}"', 'N = 1']]}]] - FnCall: [rustc_legacy_const_generics, ['2']] safety: safe @@ -14029,7 +13906,7 @@ intrinsics: attr: - *target-is-arm - FnCall: [target_feature, ['enable = "{type[1]}"']] - - *neon-unstable + - *neon-arm-unstable - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vsli.{type[2]}"', 'N = 1']]}]] - FnCall: [rustc_legacy_const_generics, ['2']] static_defs: ['const N: i32'] @@ -14066,8 +13943,8 @@ intrinsics: safety: safe attr: - *target-is-arm - - FnCall: [target_feature, ['enable = "neon,v7"']] - - *neon-unstable + - *enable-v7 + - *neon-arm-unstable - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vsli.{type[1]}"', 'N = 1']]}]] - FnCall: [rustc_legacy_const_generics, ['2']] static_defs: ['const N: i32'] @@ -14126,8 +14003,8 @@ intrinsics: attr: - *neon-v7 - *neon-fp16 - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, [nop, 'LANE = 0']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [nop, 'LANE = 0']]}]] + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [nop, 'LANE = 0']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [nop, 'LANE = 0']]}]] - FnCall: [rustc_legacy_const_generics, ["1"]] - *neon-unstable-f16 static_defs: ['const LANE: i32'] @@ -14145,8 +14022,8 @@ intrinsics: return_type: "{neon_type[0]}" attr: - *neon-v7 - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, ['"vdup.16"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [dup]]}]] + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vdup.16"']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [dup]]}]] - *neon-fp16 - *neon-unstable-f16 safety: safe @@ -14155,3 +14032,1065 @@ intrinsics: - [float16x8_t, f16] compose: - FnCall: ["vdup{neon_type[0].N}", [a]] + + - name: "{type[0]}" + doc: "Load one single-element structure to one lane of one register." + arguments: ["ptr: {type[1]}", "src: {neon_type[2]}"] + return_type: "{neon_type[2]}" + static_defs: ['const LANE: i32'] + attr: + - *neon-v7 + - FnCall: [rustc_legacy_const_generics, ['2']] + - FnCall: [cfg_attr, [*test-is-arm, { FnCall: [assert_instr, ["{type[3]}", 'LANE = {type[4]}']] } ]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, { FnCall: [assert_instr, ['{type[5]}', 'LANE = {type[4]}']]}] ] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable + safety: + unsafe: [neon] + types: + - ['vld1_lane_s8', '*const i8', 'int8x8_t', '"vld1.8"', '7', 'ld1', 'static_assert_uimm_bits!', 'LANE, 3'] + - ['vld1_lane_u8', '*const u8', 'uint8x8_t', '"vld1.8"', '7', 'ld1', 'static_assert_uimm_bits!', 'LANE, 3'] + - ['vld1_lane_p8', '*const p8', 'poly8x8_t', '"vld1.8"', '7', 'ld1', 'static_assert_uimm_bits!', 'LANE, 3'] + - ['vld1q_lane_s8', '*const i8', 'int8x16_t', '"vld1.8"', '15', 'ld1', 'static_assert_uimm_bits!', 'LANE, 4'] + - ['vld1q_lane_u8', '*const u8', 'uint8x16_t', '"vld1.8"', '15', 'ld1', 'static_assert_uimm_bits!', 'LANE, 4'] + - ['vld1q_lane_p8', '*const p8', 'poly8x16_t', '"vld1.8"', '15', 'ld1', 'static_assert_uimm_bits!', 'LANE, 4'] + - ['vld1_lane_s16', '*const i16', 'int16x4_t', '"vld1.16"', '3', 'ld1', 'static_assert_uimm_bits!', 'LANE, 2'] + - ['vld1_lane_u16', '*const u16', 'uint16x4_t', '"vld1.16"', '3', 'ld1', 'static_assert_uimm_bits!', 'LANE, 2'] + - ['vld1_lane_p16', '*const p16', 'poly16x4_t', '"vld1.16"', '3', 'ld1', 'static_assert_uimm_bits!', 'LANE, 2'] + - ['vld1q_lane_s16', '*const i16', 'int16x8_t', '"vld1.16"', '7', 'ld1', 'static_assert_uimm_bits!', 'LANE, 3'] + - ['vld1q_lane_u16', '*const u16', 'uint16x8_t', '"vld1.16"', '7', 'ld1', 'static_assert_uimm_bits!', 'LANE, 3'] + - ['vld1q_lane_p16', '*const p16', 'poly16x8_t', '"vld1.16"', '7', 'ld1', 'static_assert_uimm_bits!', 'LANE, 3'] + - ['vld1_lane_s32', '*const i32', 'int32x2_t', '"vld1.32"', '1', 'ld1', 'static_assert_uimm_bits!', 'LANE, 1'] + - ['vld1_lane_u32', '*const u32', 'uint32x2_t', '"vld1.32"', '1', 'ld1', 'static_assert_uimm_bits!', 'LANE, 1'] + - ['vld1_lane_f32', '*const f32', 'float32x2_t', '"vld1.32"', '1', 'ld1', 'static_assert_uimm_bits!', 'LANE, 1'] + - ['vld1q_lane_s32', '*const i32', 'int32x4_t', '"vld1.32"', '3', 'ld1', 'static_assert_uimm_bits!', 'LANE, 2'] + - ['vld1q_lane_u32', '*const u32', 'uint32x4_t', '"vld1.32"', '3', 'ld1', 'static_assert_uimm_bits!', 'LANE, 2'] + - ['vld1q_lane_f32', '*const f32', 'float32x4_t', '"vld1.32"', '3', 'ld1', 'static_assert_uimm_bits!', 'LANE, 2'] + - ['vld1_lane_s64', '*const i64', 'int64x1_t', 'vldr', '0', 'ldr', 'static_assert!', 'LANE == 0'] + - ['vld1_lane_u64', '*const u64', 'uint64x1_t', 'vldr', '0', 'ldr', 'static_assert!', 'LANE == 0'] + - ['vld1q_lane_s64', '*const i64', 'int64x2_t', 'vldr', '1', 'ld1', 'static_assert_uimm_bits!', 'LANE, 1'] + - ['vld1q_lane_u64', '*const u64', 'uint64x2_t', 'vldr', '1', 'ld1', 'static_assert_uimm_bits!', 'LANE, 1'] + compose: + - FnCall: ["{type[6]}", ["{type[7]}"]] + - FnCall: [simd_insert!, [src, 'LANE as u32', '*ptr']] + + - name: "{type[0]}" + doc: "Load one single-element structure to one lane of one register." + arguments: ["ptr: {type[1]}", "src: {neon_type[2]}"] + return_type: "{neon_type[2]}" + attr: + - *neon-aes + - *neon-v7 + - FnCall: [rustc_legacy_const_generics, ['2']] + - FnCall: [cfg_attr, [*test-is-arm, { FnCall: [assert_instr, ["{type[3]}", 'LANE = {type[4]}']] } ]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, { FnCall: [assert_instr, ['{type[5]}', 'LANE = {type[4]}']]}] ] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable + static_defs: ['const LANE: i32'] + safety: + unsafe: [neon] + types: + - ['vld1_lane_p64', '*const p64', 'poly64x1_t', 'vldr', '0', 'ldr', 'static_assert!', 'LANE == 0'] + - ['vld1q_lane_p64', '*const p64', 'poly64x2_t', 'vldr', '1', 'ld1', 'static_assert_uimm_bits!', 'LANE, 1'] + compose: + - FnCall: ["{type[6]}", ["{type[7]}"]] + - FnCall: [simd_insert!, [src, 'LANE as u32', '*ptr']] + + - name: "{type[0]}" + doc: "Load one single-element structure and Replicate to all lanes (of one register)." + arguments: ["ptr: {type[1]}"] + return_type: "{neon_type[2]}" + attr: + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, { FnCall: [assert_instr, ["{type[3]}"]] } ]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, { FnCall: [assert_instr, ['{type[4]}']]}] ] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable + safety: + unsafe: [neon] + types: + - ['vld1_dup_s64', '*const i64', 'int64x1_t', 'vldr', 'ldr', 'let x: int64x1_t; #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] { x = crate::core_arch::aarch64::vld1_s64(ptr); } #[cfg(target_arch = "arm")] { x = crate::core_arch::arm::vld1_s64(ptr); }'] + - ['vld1_dup_u64', '*const u64', 'uint64x1_t', 'vldr', 'ldr', 'let x: uint64x1_t; #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] { x = crate::core_arch::aarch64::vld1_u64(ptr); } #[cfg(target_arch = "arm")] { x = crate::core_arch::arm::vld1_u64(ptr); }'] + compose: + - Identifier: ['{type[5]}', Symbol] + - Identifier: [x, Symbol] + + - name: "{type[0]}" + doc: "Load one single-element structure and Replicate to all lanes (of one register)." + arguments: ["ptr: {type[1]}"] + return_type: "{neon_type[2]}" + attr: + - *neon-aes + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, { FnCall: [assert_instr, ["{type[3]}"]] } ]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, { FnCall: [assert_instr, ['{type[4]}']]}] ] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable + safety: + unsafe: [neon] + types: + - ['vld1_dup_p64', '*const p64', 'poly64x1_t', 'vldr', 'ldr', 'let x: poly64x1_t; #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] { x = crate::core_arch::aarch64::vld1_p64(ptr); } #[cfg(target_arch = "arm")] { x = crate::core_arch::arm::vld1_p64(ptr); }'] + compose: + - Identifier: ['{type[5]}', Symbol] + - Identifier: [x, Symbol] + + - name: "{type[0]}" + doc: "Load one single-element structure and Replicate to all lanes (of one register)." + arguments: ["ptr: {type[1]}"] + return_type: "{neon_type[2]}" + attr: + - *neon-aes + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, { FnCall: [assert_instr, ["{type[3]}"]] } ]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, { FnCall: [assert_instr, ['{type[4]}']]}] ] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable + safety: + unsafe: [neon] + types: + - ['vld1q_dup_p64', '*const p64', 'poly64x2_t', 'vldr', 'ld1r', 'vld1q_lane_p64::<0>', 'u64x2::splat(0)', '[0, 0]'] + compose: + - Let: + - x + - FnCall: + - '{type[5]}' + - - ptr + - FnCall: [transmute, ['{type[6]}']] + - FnCall: ['simd_shuffle!', [x, x, '{type[7]}']] + + - name: "{type[0]}" + doc: "Load one single-element structure and Replicate to all lanes (of one register)." + arguments: ["ptr: {type[1]}"] + return_type: "{neon_type[2]}" + attr: + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, { FnCall: [assert_instr, ['"{type[3]}"']] } ]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, { FnCall: [assert_instr, ['{type[4]}']]}] ] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable + safety: + unsafe: [neon] + types: + - ['vld1_dup_s8', '*const i8', 'int8x8_t', 'vld1.8', 'ld1r', 'vld1_lane_s8::<0>', 'i8x8::splat(0)', '[0, 0, 0, 0, 0, 0, 0, 0]'] + - ['vld1_dup_u8', '*const u8', 'uint8x8_t', 'vld1.8', 'ld1r', 'vld1_lane_u8::<0>', 'u8x8::splat(0)', '[0, 0, 0, 0, 0, 0, 0, 0]'] + - ['vld1_dup_p8', '*const p8', 'poly8x8_t', 'vld1.8', 'ld1r', 'vld1_lane_p8::<0>', 'u8x8::splat(0)', '[0, 0, 0, 0, 0, 0, 0, 0]'] + + - ['vld1q_dup_s8', '*const i8', 'int8x16_t', 'vld1.8', 'ld1r', 'vld1q_lane_s8::<0>', 'i8x16::splat(0)', '[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]'] + - ['vld1q_dup_u8', '*const u8', 'uint8x16_t', 'vld1.8', 'ld1r', 'vld1q_lane_u8::<0>', 'u8x16::splat(0)', '[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]'] + - ['vld1q_dup_p8', '*const p8', 'poly8x16_t', 'vld1.8', 'ld1r', 'vld1q_lane_p8::<0>', 'u8x16::splat(0)', '[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]'] + + - ['vld1_dup_s16', '*const i16', 'int16x4_t', 'vld1.16', 'ld1r', 'vld1_lane_s16::<0>', 'i16x4::splat(0)', '[0, 0, 0, 0]'] + - ['vld1_dup_u16', '*const u16', 'uint16x4_t', 'vld1.16', 'ld1r', 'vld1_lane_u16::<0>', 'u16x4::splat(0)', '[0, 0, 0, 0]'] + - ['vld1_dup_p16', '*const p16', 'poly16x4_t', 'vld1.16', 'ld1r', 'vld1_lane_p16::<0>', 'u16x4::splat(0)', '[0, 0, 0, 0]'] + + - ['vld1q_dup_s16', '*const i16', 'int16x8_t', 'vld1.16', 'ld1r', 'vld1q_lane_s16::<0>', 'i16x8::splat(0)', '[0, 0, 0, 0, 0, 0, 0, 0]'] + - ['vld1q_dup_u16', '*const u16', 'uint16x8_t', 'vld1.16', 'ld1r', 'vld1q_lane_u16::<0>', 'u16x8::splat(0)', '[0, 0, 0, 0, 0, 0, 0, 0]'] + - ['vld1q_dup_p16', '*const p16', 'poly16x8_t', 'vld1.16', 'ld1r', 'vld1q_lane_p16::<0>', 'u16x8::splat(0)', '[0, 0, 0, 0, 0, 0, 0, 0]'] + + - ['vld1_dup_s32', '*const i32', 'int32x2_t', 'vld1.32', 'ld1r', 'vld1_lane_s32::<0>', 'i32x2::splat(0)', '[0, 0]'] + - ['vld1_dup_u32', '*const u32', 'uint32x2_t', 'vld1.32', 'ld1r', 'vld1_lane_u32::<0>', 'u32x2::splat(0)', '[0, 0]'] + - ['vld1_dup_f32', '*const f32', 'float32x2_t', 'vld1.32', 'ld1r', 'vld1_lane_f32::<0>', 'f32x2::splat(0.0)', '[0, 0]'] + + - ['vld1q_dup_s32', '*const i32', 'int32x4_t', 'vld1.32', 'ld1r', 'vld1q_lane_s32::<0>', 'i32x4::splat(0)', '[0, 0, 0, 0]'] + - ['vld1q_dup_u32', '*const u32', 'uint32x4_t', 'vld1.32', 'ld1r', 'vld1q_lane_u32::<0>', 'u32x4::splat(0)', '[0, 0, 0, 0]'] + - ['vld1q_dup_f32', '*const f32', 'float32x4_t', 'vld1.32', 'ld1r', 'vld1q_lane_f32::<0>', 'f32x4::splat(0.0)', '[0, 0, 0, 0]'] + + - ['vld1q_dup_s64', '*const i64', 'int64x2_t', 'vldr', 'ld1', 'vld1q_lane_s64::<0>', 'i64x2::splat(0)', '[0, 0]'] + - ['vld1q_dup_u64', '*const u64', 'uint64x2_t', 'vldr', 'ld1', 'vld1q_lane_u64::<0>', 'u64x2::splat(0)', '[0, 0]'] + compose: + - Let: + - x + - FnCall: + - '{type[5]}' + - - ptr + - FnCall: [transmute, ['{type[6]}']] + - FnCall: ['simd_shuffle!', [x, x, '{type[7]}']] + + - name: "{type[0]}" + doc: "Absolute difference and accumulate (64-bit)" + arguments: ['a: {neon_type[1]}', 'b: {neon_type[1]}', 'c: {neon_type[1]}'] + return_type: "{neon_type[1]}" + attr: + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, { FnCall: [assert_instr, ['"{type[2]}"']] } ]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, { FnCall: [assert_instr, ['{type[3]}']]}] ] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable + safety: safe + types: + - ['vaba_s8', 'int8x8_t', 'vaba.s8', 'saba', 'vabd_s8'] + - ['vaba_u8', 'uint8x8_t', 'vaba.u8', 'uaba', 'vabd_u8'] + - ['vaba_s16', 'int16x4_t', 'vaba.s16', 'saba', 'vabd_s16'] + - ['vaba_u16', 'uint16x4_t', 'vaba.u16', 'uaba', 'vabd_u16'] + - ['vaba_s32', 'int32x2_t', 'vaba.s32', 'saba', 'vabd_s32'] + - ['vaba_u32', 'uint32x2_t', 'vaba.u32', 'uaba', 'vabd_u32'] + compose: + - FnCall: + - 'simd_add' + - - a + - FnCall: ['{type[4]}', [b, c]] + + - name: "{type[0]}" + doc: "Absolute difference and accumulate (128-bit)" + arguments: ['a: {neon_type[1]}', 'b: {neon_type[1]}', 'c: {neon_type[1]}'] + return_type: "{neon_type[1]}" + attr: + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, { FnCall: [assert_instr, ['"{type[2]}"']] } ]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, { FnCall: [assert_instr, ['{type[3]}']]}] ] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable + safety: safe + types: + - ['vabaq_s8', 'int8x16_t', 'vaba.s8', 'saba', 'vabdq_s8'] + - ['vabaq_u8', 'uint8x16_t', 'vaba.u8', 'uaba', 'vabdq_u8'] + - ['vabaq_s16', 'int16x8_t', 'vaba.s16', 'saba', 'vabdq_s16'] + - ['vabaq_u16', 'uint16x8_t', 'vaba.u16', 'uaba', 'vabdq_u16'] + - ['vabaq_s32', 'int32x4_t', 'vaba.s32', 'saba', 'vabdq_s32'] + - ['vabaq_u32', 'uint32x4_t', 'vaba.u32', 'uaba', 'vabdq_u32'] + compose: + - FnCall: + - 'simd_add' + - - a + - FnCall: ['{type[4]}', [b, c]] + + - name: "{type[0]}" + doc: "Vector add." + arguments: ['a: {neon_type[1]}', 'b: {neon_type[1]}'] + return_type: "{neon_type[1]}" + attr: + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, { FnCall: [assert_instr, ['{type[2]}']] } ]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, { FnCall: [assert_instr, ['{type[3]}']]}] ] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable + safety: safe + types: + - ['vadd_s8', 'int8x8_t', 'vadd', 'add'] + - ['vaddq_s8', 'int8x16_t', 'vadd', 'add'] + - ['vadd_s16', 'int16x4_t', 'vadd', 'add'] + - ['vaddq_s16', 'int16x8_t', 'vadd', 'add'] + - ['vadd_s32', 'int32x2_t', 'vadd', 'add'] + - ['vaddq_s32', 'int32x4_t', 'vadd', 'add'] + - ['vaddq_s64', 'int64x2_t', 'vadd', 'add'] + - ['vadd_f32', 'float32x2_t', 'vadd', 'fadd'] + - ['vaddq_f32', 'float32x4_t', 'vadd', 'fadd'] + - ['vadd_u8', 'uint8x8_t', 'vadd', 'add'] + - ['vaddq_u8', 'uint8x16_t', 'vadd', 'add'] + - ['vadd_u16', 'uint16x4_t', 'vadd', 'add'] + - ['vaddq_u16', 'uint16x8_t', 'vadd', 'add'] + - ['vadd_u32', 'uint32x2_t', 'vadd', 'add'] + - ['vaddq_u32', 'uint32x4_t', 'vadd', 'add'] + - ['vaddq_u64', 'uint64x2_t', 'vadd', 'add'] + compose: + - FnCall: ['simd_add', [a, b]] + + - name: "{type[0]}" + doc: "Add Long (vector)." + arguments: ['a: {neon_type[1]}', 'b: {neon_type[1]}'] + return_type: "{neon_type[2]}" + attr: + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, { FnCall: [assert_instr, ['{type[3]}']] } ]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, { FnCall: [assert_instr, ['{type[4]}']]}] ] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable + safety: safe + types: + - ['vaddl_s8', 'int8x8_t', 'int16x8_t', 'vaddl', 'saddl'] + - ['vaddl_s16', 'int16x4_t', 'int32x4_t', 'vaddl', 'saddl'] + - ['vaddl_s32', 'int32x2_t', 'int64x2_t', 'vaddl', 'saddl'] + - ['vaddl_u8', 'uint8x8_t', 'uint16x8_t', 'vaddl', 'uaddl'] + - ['vaddl_u16', 'uint16x4_t', 'uint32x4_t', 'vaddl', 'uaddl'] + - ['vaddl_u32', 'uint32x2_t', 'uint64x2_t', 'vaddl', 'uaddl'] + compose: + - Let: + - a + - '{neon_type[2]}' + - FnCall: [simd_cast, [a]] + - Let: + - b + - '{neon_type[2]}' + - FnCall: [simd_cast, [b]] + - FnCall: ['simd_add', [a, b]] + + - name: "{type[0]}" + doc: "Signed Add Long (vector, high half)." + arguments: ['a: {neon_type[1]}', 'b: {neon_type[1]}'] + return_type: "{neon_type[2]}" + attr: + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, { FnCall: [assert_instr, ['{type[3]}']] } ]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, { FnCall: [assert_instr, ['{type[4]}']]}] ] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable + safety: safe + types: + - ['vaddl_high_s8', 'int8x16_t', 'int16x8_t', 'vaddl', 'saddl2', 'int8x8_t', '[8, 9, 10, 11, 12, 13, 14, 15]'] + - ['vaddl_high_s16', 'int16x8_t', 'int32x4_t', 'vaddl', 'saddl2', 'int16x4_t', '[4, 5, 6, 7]'] + - ['vaddl_high_s32', 'int32x4_t', 'int64x2_t', 'vaddl', 'saddl2', 'int32x2_t', '[2, 3]'] + - ['vaddl_high_u8', 'uint8x16_t', 'uint16x8_t', 'vaddl', 'uaddl2', 'uint8x8_t', '[8, 9, 10, 11, 12, 13, 14, 15]'] + - ['vaddl_high_u16', 'uint16x8_t', 'uint32x4_t', 'vaddl', 'uaddl2', 'uint16x4_t', '[4, 5, 6, 7]'] + - ['vaddl_high_u32', 'uint32x4_t', 'uint64x2_t', 'vaddl', 'uaddl2', 'uint32x2_t', '[2, 3]'] + compose: + - Let: + - a + - '{neon_type[5]}' + - FnCall: ['simd_shuffle!', [a, a, '{type[6]}']] + - Let: + - b + - '{neon_type[5]}' + - FnCall: ['simd_shuffle!', [b, b, '{type[6]}']] + - Let: [a, '{neon_type[2]}', {FnCall: [simd_cast, [a]]}] + - Let: [b, '{neon_type[2]}', {FnCall: [simd_cast, [b]]}] + - FnCall: [simd_add, [a, b]] + + - name: "{type[0]}" + doc: "Add Wide" + arguments: ['a: {neon_type[1]}', 'b: {neon_type[2]}'] + return_type: "{neon_type[1]}" + attr: + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, { FnCall: [assert_instr, ['{type[3]}']] } ]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, { FnCall: [assert_instr, ['{type[4]}']]}] ] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable + safety: safe + types: + - ['vaddw_s8', 'int16x8_t', 'int8x8_t', 'vaddw', 'saddw'] + - ['vaddw_s16', 'int32x4_t', 'int16x4_t', 'vaddw', 'saddw'] + - ['vaddw_s32', 'int64x2_t', 'int32x2_t', 'vaddw', 'saddw'] + - ['vaddw_u8', 'uint16x8_t', 'uint8x8_t', 'vaddw', 'uaddw'] + - ['vaddw_u16', 'uint32x4_t', 'uint16x4_t', 'vaddw', 'uaddw'] + - ['vaddw_u32', 'uint64x2_t', 'uint32x2_t', 'vaddw', 'uaddw'] + compose: + - Let: + - b + - '{neon_type[1]}' + - FnCall: ['simd_cast', [b]] + - FnCall: [simd_add, [a, b]] + + - name: "{type[0]}" + doc: "Add Wide (high half)." + arguments: ['a: {neon_type[1]}', 'b: {neon_type[2]}'] + return_type: "{neon_type[1]}" + attr: + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, { FnCall: [assert_instr, ['{type[3]}']] } ]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, { FnCall: [assert_instr, ['{type[4]}']]}] ] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable + safety: safe + types: + - ['vaddw_high_s8', 'int16x8_t', 'int8x16_t', 'vaddw', 'saddw2', 'int8x8_t', '[8, 9, 10, 11, 12, 13, 14, 15]'] + - ['vaddw_high_s16', 'int32x4_t', 'int16x8_t', 'vaddw', 'saddw2', 'int16x4_t', '[4, 5, 6, 7]'] + - ['vaddw_high_s32', 'int64x2_t', 'int32x4_t', 'vaddw', 'saddw2', 'int32x2_t', '[2, 3]'] + - ['vaddw_high_u8', 'uint16x8_t', 'uint8x16_t', 'vaddw', 'uaddw2', 'uint8x8_t', '[8, 9, 10, 11, 12, 13, 14, 15]'] + - ['vaddw_high_u16', 'uint32x4_t', 'uint16x8_t', 'vaddw', 'uaddw2', 'uint16x4_t', '[4, 5, 6, 7]'] + - ['vaddw_high_u32', 'uint64x2_t', 'uint32x4_t', 'vaddw', 'uaddw2', 'uint32x2_t', '[2, 3]'] + compose: + - Let: + - b + - '{neon_type[5]}' + - FnCall: ['simd_shuffle!', [b, b, '{type[6]}']] + - Let: + - b + - '{neon_type[1]}' + - FnCall: ['simd_cast', [b]] + - FnCall: [simd_add, [a, b]] + + - name: "{type[0]}" + doc: "Add returning High Narrow." + arguments: ['a: {neon_type[1]}', 'b: {neon_type[1]}'] + return_type: "{neon_type[2]}" + attr: + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, { FnCall: [assert_instr, ['vaddhn']] } ]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, { FnCall: [assert_instr, ['addhn']]}] ] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable + safety: safe + types: + - ['vaddhn_s16', 'int16x8_t', 'int8x8_t', 'int16x8_t::splat(8)'] + - ['vaddhn_s32', 'int32x4_t', 'int16x4_t', 'int32x4_t::splat(16)'] + - ['vaddhn_s64', 'int64x2_t', 'int32x2_t', 'int64x2_t::splat(32)'] + - ['vaddhn_u16', 'uint16x8_t', 'uint8x8_t', 'uint16x8_t::splat(8)'] + - ['vaddhn_u32', 'uint32x4_t', 'uint16x4_t', 'uint32x4_t::splat(16)'] + - ['vaddhn_u64', 'uint64x2_t', 'uint32x2_t', 'uint64x2_t::splat(32)'] + compose: + - FnCall: + - simd_cast + - - FnCall: + - simd_shr + - - FnCall: + - simd_add + - - a + - b + - '{type[3]}' + + - name: "{type[0]}" + doc: "Add returning High Narrow (high half)." + arguments: ['r: {neon_type[1]}', 'a: {neon_type[2]}', 'b: {neon_type[2]}'] + return_type: "{neon_type[3]}" + attr: + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, { FnCall: [assert_instr, ['vaddhn']] } ]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, { FnCall: [assert_instr, ['addhn2']]}] ] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable + safety: safe + types: + - ['vaddhn_high_s16', 'int8x8_t', 'int16x8_t', 'int8x16_t', 'int16x8_t::splat(8)', '[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]'] + - ['vaddhn_high_s32', 'int16x4_t', 'int32x4_t', 'int16x8_t', 'int32x4_t::splat(16)', '[0, 1, 2, 3, 4, 5, 6, 7]'] + - ['vaddhn_high_s64', 'int32x2_t', 'int64x2_t', 'int32x4_t', 'int64x2_t::splat(32)', '[0, 1, 2, 3]'] + - ['vaddhn_high_u16', 'uint8x8_t', 'uint16x8_t', 'uint8x16_t', 'uint16x8_t::splat(8)', '[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]'] + - ['vaddhn_high_u32', 'uint16x4_t', 'uint32x4_t', 'uint16x8_t', 'uint32x4_t::splat(16)', '[0, 1, 2, 3, 4, 5, 6, 7]'] + - ['vaddhn_high_u64', 'uint32x2_t', 'uint64x2_t', 'uint32x4_t', 'uint64x2_t::splat(32)', '[0, 1, 2, 3]'] + compose: + - Let: + - x + - FnCall: + - simd_cast + - - FnCall: + - simd_shr + - - FnCall: + - simd_add + - - a + - b + - '{type[4]}' + - FnCall: ['simd_shuffle!', [r, x, '{type[5]}']] + + - name: "{type[0]}" + doc: "Vector narrow integer." + arguments: ['a: {neon_type[1]}'] + return_type: "{neon_type[2]}" + attr: + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, { FnCall: [assert_instr, ['vmovn']] } ]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, { FnCall: [assert_instr, ['xtn']]}] ] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable + safety: safe + types: + - ['vmovn_s16', 'int16x8_t', 'int8x8_t'] + - ['vmovn_s32', 'int32x4_t', 'int16x4_t'] + - ['vmovn_s64', 'int64x2_t', 'int32x2_t'] + - ['vmovn_u16', 'uint16x8_t', 'uint8x8_t'] + - ['vmovn_u32', 'uint32x4_t', 'uint16x4_t'] + - ['vmovn_u64', 'uint64x2_t', 'uint32x2_t'] + compose: + - FnCall: [simd_cast, [a]] + + - name: "{type[0]}" + doc: "Vector long move." + arguments: ['a: {neon_type[1]}'] + return_type: "{neon_type[2]}" + attr: + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, { FnCall: [assert_instr, ['vmovl']] } ]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, { FnCall: [assert_instr, ['{type[3]}']]}] ] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable + safety: safe + types: + - ['vmovl_s8', 'int8x8_t', 'int16x8_t', 'sxtl'] + - ['vmovl_s16', 'int16x4_t', 'int32x4_t', 'sxtl'] + - ['vmovl_s32', 'int32x2_t', 'int64x2_t', 'sxtl'] + - ['vmovl_u8', 'uint8x8_t', 'uint16x8_t', 'uxtl'] + - ['vmovl_u16', 'uint16x4_t', 'uint32x4_t', 'uxtl'] + - ['vmovl_u32', 'uint32x2_t', 'uint64x2_t', 'uxtl'] + compose: + - FnCall: [simd_cast, [a]] + + - name: "{type[0]}" + doc: "Vector bitwise not." + arguments: ['a: {neon_type[1]}'] + return_type: "{neon_type[1]}" + attr: + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, { FnCall: [assert_instr, ['vmvn']] } ]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, { FnCall: [assert_instr, ['mvn']]}] ] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable + safety: safe + types: + - ['vmvn_s8', 'int8x8_t', 'int8x8_t::splat(-1)'] + - ['vmvnq_s8', 'int8x16_t', 'int8x16_t::splat(-1)'] + - ['vmvn_s16', 'int16x4_t', 'int16x4_t::splat(-1)'] + - ['vmvnq_s16', 'int16x8_t', 'int16x8_t::splat(-1)'] + - ['vmvn_s32', 'int32x2_t', 'int32x2_t::splat(-1)'] + - ['vmvnq_s32', 'int32x4_t', 'int32x4_t::splat(-1)'] + - ['vmvn_u8', 'uint8x8_t', 'uint8x8_t::splat(255)'] + - ['vmvnq_u8', 'uint8x16_t', 'uint8x16_t::splat(255)'] + - ['vmvn_u16', 'uint16x4_t', 'uint16x4_t::splat(65_535)'] + - ['vmvnq_u16', 'uint16x8_t', 'uint16x8_t::splat(65_535)'] + - ['vmvn_u32', 'uint32x2_t', 'uint32x2_t::splat(4_294_967_295)'] + - ['vmvnq_u32', 'uint32x4_t', 'uint32x4_t::splat(4_294_967_295)'] + - ['vmvn_p8', 'poly8x8_t', 'poly8x8_t::splat(255)'] + - ['vmvnq_p8', 'poly8x16_t', 'poly8x16_t::splat(255)'] + compose: + - Let: [b, '{type[2]}'] + - FnCall: [simd_xor, [a, b]] + + - name: "{type[0]}" + doc: "Vector bitwise bit clear." + arguments: ['a: {neon_type[1]}', 'b: {neon_type[1]}'] + return_type: "{neon_type[1]}" + attr: + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, { FnCall: [assert_instr, ['vbic']] } ]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, { FnCall: [assert_instr, ['bic']]}] ] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable + safety: safe + types: + - ['vbic_s8', 'int8x8_t', 'int8x8_t::splat(-1)'] + - ['vbic_s16', 'int16x4_t', 'int16x4_t::splat(-1)'] + - ['vbic_s32', 'int32x2_t', 'int32x2_t::splat(-1)'] + - ['vbic_s64', 'int64x1_t', 'int64x1_t::splat(-1)'] + - ['vbicq_s8', 'int8x16_t', 'int8x16_t::splat(-1)'] + - ['vbicq_s16', 'int16x8_t', 'int16x8_t::splat(-1)'] + - ['vbicq_s32', 'int32x4_t', 'int32x4_t::splat(-1)'] + - ['vbicq_s64', 'int64x2_t', 'int64x2_t::splat(-1)'] + compose: + - Let: [c, '{type[2]}'] + - FnCall: + - simd_and + - - FnCall: [simd_xor, [b, c]] + - a + + - name: "{type[0]}" + doc: "Vector bitwise bit clear." + arguments: ['a: {neon_type[1]}', 'b: {neon_type[1]}'] + return_type: "{neon_type[1]}" + attr: + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, { FnCall: [assert_instr, ['vbic']] } ]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, { FnCall: [assert_instr, ['bic']]}] ] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable + safety: safe + types: + - ['vbic_u8', 'uint8x8_t', 'int8x8_t::splat(-1)'] + - ['vbic_u16', 'uint16x4_t', 'int16x4_t::splat(-1)'] + - ['vbic_u32', 'uint32x2_t', 'int32x2_t::splat(-1)'] + - ['vbic_u64', 'uint64x1_t', 'int64x1_t::splat(-1)'] + - ['vbicq_u8', 'uint8x16_t', 'int8x16_t::splat(-1)'] + - ['vbicq_u16', 'uint16x8_t', 'int16x8_t::splat(-1)'] + - ['vbicq_u32', 'uint32x4_t', 'int32x4_t::splat(-1)'] + - ['vbicq_u64', 'uint64x2_t', 'int64x2_t::splat(-1)'] + compose: + - Let: [c, '{type[2]}'] + - FnCall: + - simd_and + - - FnCall: + - simd_xor + - - b + - FnCall: [transmute, [c]] + - a + + - name: "{type[0]}" + doc: "Bitwise Select." + arguments: ["a: {neon_type[1]}", "b: {neon_type[2]}", "c: {neon_type[2]}"] + return_type: "{neon_type[2]}" + attr: + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, { FnCall: [assert_instr, ['vbsl']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, ['bsl']]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable + safety: safe + types: + - ['vbsl_s8', 'uint8x8_t', 'int8x8_t', 'int8x8_t::splat(-1)'] + - ['vbsl_s16', 'uint16x4_t', 'int16x4_t', 'int16x4_t::splat(-1)'] + - ['vbsl_s32', 'uint32x2_t', 'int32x2_t', 'int32x2_t::splat(-1)'] + - ['vbsl_s64', 'uint64x1_t', 'int64x1_t', 'int64x1_t::splat(-1)'] + - ['vbsl_f32', 'uint32x2_t', 'float32x2_t', 'int32x2_t::splat(-1)'] + - ['vbslq_f32', 'uint32x4_t', 'float32x4_t', 'int32x4_t::splat(-1)'] + - ['vbsl_p8', 'uint8x8_t', 'poly8x8_t', 'int8x8_t::splat(-1)'] + - ['vbsl_p16', 'uint16x4_t', 'poly16x4_t', 'int16x4_t::splat(-1)'] + - ['vbslq_s8', 'uint8x16_t', 'int8x16_t', 'int8x16_t::splat(-1)'] + - ['vbslq_s16', 'uint16x8_t', 'int16x8_t', 'int16x8_t::splat(-1)'] + - ['vbslq_s32', 'uint32x4_t', 'int32x4_t', 'int32x4_t::splat(-1)'] + - ['vbslq_s64', 'uint64x2_t', 'int64x2_t', 'int64x2_t::splat(-1)'] + - ['vbslq_p8', 'uint8x16_t', 'poly8x16_t', 'int8x16_t::splat(-1)'] + - ['vbslq_p16', 'uint16x8_t', 'poly16x8_t', 'int16x8_t::splat(-1)'] + compose: + - Let: [not, '{type[3]}'] + - FnCall: + - transmute + - - FnCall: + - simd_or + - - FnCall: + - simd_and + - - a + - FnCall: [transmute, [b]] + - FnCall: + - simd_and + - - FnCall: + - simd_xor + - - a + - FnCall: [transmute, [not]] + - FnCall: [transmute, [c]] + + - name: "{type[0]}" + doc: "Bitwise Select." + arguments: ["a: {neon_type[1]}", "b: {neon_type[2]}", "c: {neon_type[2]}"] + return_type: "{neon_type[2]}" + attr: + - *neon-fp16 + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, { FnCall: [assert_instr, ['vbsl']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, ['bsl']]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable + safety: safe + types: + - ['vbslq_f16', 'uint16x8_t', 'float16x8_t', 'int16x8_t::splat(-1)'] + - ['vbsl_f16', 'uint16x4_t', 'float16x4_t', 'int16x4_t::splat(-1)'] + compose: + - Let: [not, '{type[3]}'] + - FnCall: + - transmute + - - FnCall: + - simd_or + - - FnCall: + - simd_and + - - a + - FnCall: [transmute, [b]] + - FnCall: + - simd_and + - - FnCall: + - simd_xor + - - a + - FnCall: [transmute, [not]] + - FnCall: [transmute, [c]] + + - name: "{type[0]}" + doc: "Bitwise Select." + arguments: ["a: {neon_type[1]}", "b: {neon_type[1]}", "c: {neon_type[1]}"] + return_type: "{neon_type[1]}" + attr: + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, { FnCall: [assert_instr, ['vbsl']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, ['bsl']]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable + safety: safe + types: + - ['vbslq_u8', 'uint8x16_t', 'int8x16_t::splat(-1)'] + - ['vbslq_u16', 'uint16x8_t', 'int16x8_t::splat(-1)'] + - ['vbslq_u32', 'uint32x4_t', 'int32x4_t::splat(-1)'] + - ['vbslq_u64', 'uint64x2_t', 'int64x2_t::splat(-1)'] + - ['vbsl_u8', 'uint8x8_t', 'int8x8_t::splat(-1)'] + - ['vbsl_u16', 'uint16x4_t', 'int16x4_t::splat(-1)'] + - ['vbsl_u32', 'uint32x2_t', 'int32x2_t::splat(-1)'] + - ['vbsl_u64', 'uint64x1_t', 'int64x1_t::splat(-1)'] + compose: + - Let: [not, '{type[2]}'] + - FnCall: + - transmute + - - FnCall: + - simd_or + - - FnCall: [simd_and, [a, b]] + - FnCall: + - simd_and + - - FnCall: + - simd_xor + - - a + - FnCall: [transmute, [not]] + - c + + - name: "{type[0]}" + doc: "Vector bitwise inclusive OR NOT" + arguments: ["a: {neon_type[1]}", "b: {neon_type[1]}"] + return_type: "{neon_type[1]}" + attr: + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, { FnCall: [assert_instr, ['vorn']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, ['orn']]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable + safety: safe + types: + - ['vorn_s8', 'int8x8_t', 'int8x8_t::splat(-1)'] + - ['vornq_s8', 'int8x16_t', 'int8x16_t::splat(-1)'] + - ['vorn_s16', 'int16x4_t', 'int16x4_t::splat(-1)'] + - ['vornq_s16', 'int16x8_t', 'int16x8_t::splat(-1)'] + - ['vorn_s32', 'int32x2_t', 'int32x2_t::splat(-1)'] + - ['vornq_s32', 'int32x4_t', 'int32x4_t::splat(-1)'] + - ['vorn_s64', 'int64x1_t', 'int64x1_t::splat(-1)'] + - ['vornq_s64', 'int64x2_t', 'int64x2_t::splat(-1)'] + compose: + - Let: [c, '{type[2]}'] + - FnCall: + - simd_or + - - FnCall: [simd_xor, [b, c]] + - a + + - name: "{type[0]}" + doc: "Vector bitwise inclusive OR NOT" + arguments: ["a: {neon_type[1]}", "b: {neon_type[1]}"] + return_type: "{neon_type[1]}" + attr: + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, { FnCall: [assert_instr, ['vorn']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, ['orn']]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable + safety: safe + types: + - ['vorn_u8', 'uint8x8_t', 'int8x8_t::splat(-1)'] + - ['vornq_u8', 'uint8x16_t', 'int8x16_t::splat(-1)'] + - ['vorn_u16', 'uint16x4_t', 'int16x4_t::splat(-1)'] + - ['vornq_u16', 'uint16x8_t', 'int16x8_t::splat(-1)'] + - ['vorn_u32', 'uint32x2_t', 'int32x2_t::splat(-1)'] + - ['vornq_u32', 'uint32x4_t', 'int32x4_t::splat(-1)'] + - ['vorn_u64', 'uint64x1_t', 'int64x1_t::splat(-1)'] + - ['vornq_u64', 'uint64x2_t', 'int64x2_t::splat(-1)'] + compose: + - Let: [c, '{type[2]}'] + - FnCall: + - simd_or + - - FnCall: + - simd_xor + - - b + - FnCall: [transmute, [c]] + - a + + - name: "{type[0]}" + doc: "Move vector element to general-purpose register" + arguments: ["v: {neon_type[1]}"] + return_type: "{type[2]}" + safety: safe + static_defs: ['const IMM5: i32'] + attr: + - *neon-v7 + - FnCall: [rustc_legacy_const_generics, ['1']] + - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [nop, 'IMM5 = {type[3]}']]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable + types: + - ['vget_lane_s8', 'int8x8_t', 'i8', '2', 'IMM5, 3', 'IMM5 as u32'] + - ['vget_lane_u8', 'uint8x8_t', 'u8', '2', 'IMM5, 3', 'IMM5 as u32'] + - ['vget_lane_p8', 'poly8x8_t', 'p8', '2', 'IMM5, 3', 'IMM5 as u32'] + - ['vgetq_lane_s8', 'int8x16_t', 'i8', '2', 'IMM5, 4', 'IMM5 as u32'] + - ['vgetq_lane_u8', 'uint8x16_t', 'u8', '2', 'IMM5, 4', 'IMM5 as u32'] + - ['vgetq_lane_p8', 'poly8x16_t', 'p8', '2', 'IMM5, 4', 'IMM5 as u32'] + - ['vget_lane_u16', 'uint16x4_t', 'u16', '2', 'IMM5, 2', 'IMM5 as u32'] + - ['vget_lane_s16', 'int16x4_t', 'i16', '2', 'IMM5, 2', 'IMM5 as u32'] + - ['vget_lane_p16', 'poly16x4_t', 'p16', '2', 'IMM5, 2', 'IMM5 as u32'] + - ['vgetq_lane_u16', 'uint16x8_t', 'u16', '2', 'IMM5, 3', 'IMM5 as u32'] + - ['vgetq_lane_s16', 'int16x8_t', 'i16', '2', 'IMM5, 3', 'IMM5 as u32'] + - ['vgetq_lane_p16', 'poly16x8_t', 'p16', '2', 'IMM5, 3', 'IMM5 as u32'] + - ['vget_lane_u32', 'uint32x2_t', 'u32', '1', 'IMM5, 1', 'IMM5 as u32'] + - ['vget_lane_s32', 'int32x2_t', 'i32', '1', 'IMM5, 1', 'IMM5 as u32'] + - ['vgetq_lane_u32', 'uint32x4_t', 'u32', '2', 'IMM5, 2', 'IMM5 as u32'] + - ['vgetq_lane_s32', 'int32x4_t', 'i32', '2', 'IMM5, 2', 'IMM5 as u32'] + - ['vget_lane_f32', 'float32x2_t', 'f32', '1', 'IMM5, 1', 'IMM5 as u32'] + - ['vgetq_lane_f32', 'float32x4_t', 'f32', '1', 'IMM5, 2', 'IMM5 as u32'] + - ['vgetq_lane_p64', 'poly64x2_t', 'p64', '1', 'IMM5, 1', 'IMM5 as u32'] + - ['vgetq_lane_s64', 'int64x2_t', 'i64', '1', 'IMM5, 1', 'IMM5 as u32'] + - ['vgetq_lane_u64', 'uint64x2_t', 'u64', '1', 'IMM5, 2', 'IMM5 as u32'] + compose: + - FnCall: ['static_assert_uimm_bits!', ['{type[4]}']] + - FnCall: ['simd_extract!', [v, '{type[5]}']] + + - name: "{type[0]}" + doc: "Move vector element to general-purpose register" + arguments: ["v: {neon_type[1]}"] + return_type: "{type[2]}" + safety: safe + static_defs: ['const IMM5: i32'] + attr: + - *neon-v7 + - FnCall: [rustc_legacy_const_generics, ['1']] + - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [nop, 'IMM5 = 0']]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable + types: + - ['vget_lane_u64', 'uint64x1_t', 'u64', '0'] + - ['vget_lane_p64', 'poly64x1_t', 'p64', 'IMM5 as u32'] + - ['vget_lane_s64', 'int64x1_t', 'i64', 'IMM5 as u32'] + compose: + - FnCall: ['static_assert!', ['IMM5 == 0']] + - FnCall: ['simd_extract!', [v, '{type[3]}']] + + # Private vfp4 version used by FMA intriniscs because LLVM does + # not inline the non-vfp4 version in vfp4 functions. + - name: "{type[0]}" + visibility: private + doc: "Duplicate vector element to vector or scalar" + arguments: ["value: {type[1]}"] + return_type: "{neon_type[2]}" + attr: + - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "vfp4"']]}]] + - FnCall: [cfg_attr, [*test-is-arm, { FnCall: [assert_instr, ['"vdup.32"']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, ['dup']]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable + safety: safe + types: + - ['vdup_n_f32_vfp4', 'f32', 'float32x2_t', 'float32x2_t::splat(value)'] + - ['vdupq_n_f32_vfp4', 'f32', 'float32x4_t', 'float32x4_t::splat(value)'] + compose: + - Identifier: ['{type[3]}', Symbol] + + - name: "{type[0]}" + doc: "Duplicate vector element to vector or scalar" + arguments: ["a: {type[1]}"] + return_type: "{neon_type[2]}" + attr: + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, { FnCall: [assert_instr, ['"{type[3]}"']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, ['{type[4]}']]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable + safety: safe + types: + - ['vget_high_s64', 'int64x2_t', 'int64x1_t', 'vmov', 'ext', 'unsafe { int64x1_t([simd_extract!(a, 1)]) }'] + - ['vget_high_u64', 'uint64x2_t', 'uint64x1_t', 'vmov', 'ext', 'unsafe { uint64x1_t([simd_extract!(a, 1)]) }'] + compose: + - Identifier: ['{type[5]}', Symbol] + + - name: "{type[0]}" + doc: "Duplicate vector element to vector or scalar" + arguments: ["a: {type[1]}"] + return_type: "{neon_type[2]}" + attr: + - *neon-v7 + - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [nop]]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable + safety: safe + types: + - ['vget_low_s64', 'int64x2_t', 'int64x1_t', 'unsafe { int64x1_t([simd_extract!(a, 0)]) }'] + - ['vget_low_u64', 'uint64x2_t', 'uint64x1_t', 'unsafe { uint64x1_t([simd_extract!(a, 0)]) }'] + compose: + - Identifier: ['{type[3]}', Symbol] + + - name: "{type[0]}" + doc: "Duplicate vector element to vector or scalar" + arguments: ["a: {type[1]}"] + return_type: "{neon_type[2]}" + attr: + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, { FnCall: [assert_instr, ['"{type[3]}"']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, ['{type[4]}']]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable + safety: safe + types: + - ['vget_high_s8', 'int8x16_t', 'int8x8_t', 'vmov', 'ext', '[8, 9, 10, 11, 12, 13, 14, 15]'] + - ['vget_high_u8', 'uint8x16_t', 'uint8x8_t', 'vmov', 'ext', '[8, 9, 10, 11, 12, 13, 14, 15]'] + - ['vget_high_p8', 'poly8x16_t', 'poly8x8_t', 'vmov', 'ext', '[8, 9, 10, 11, 12, 13, 14, 15]'] + - ['vget_high_s16', 'int16x8_t', 'int16x4_t', 'vmov', 'ext', '[4, 5, 6, 7]'] + - ['vget_high_u16', 'uint16x8_t', 'uint16x4_t', 'vmov', 'ext', '[4, 5, 6, 7]'] + - ['vget_high_p16', 'poly16x8_t', 'poly16x4_t', 'vmov', 'ext', '[4, 5, 6, 7]'] + - ['vget_high_s32', 'int32x4_t', 'int32x2_t', 'vmov', 'ext', '[2, 3]'] + - ['vget_high_u32', 'uint32x4_t', 'uint32x2_t', 'vmov', 'ext', '[2, 3]'] + - ['vget_high_f32', 'float32x4_t', 'float32x2_t', 'vmov', 'ext', '[2, 3]'] + compose: + - FnCall: ['simd_shuffle!', [a, a, '{type[5]}']] + + - name: "{type[0]}" + doc: "Duplicate vector element to vector or scalar" + arguments: ["a: {type[1]}"] + return_type: "{neon_type[2]}" + attr: + - *neon-v7 + - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [nop]]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable + safety: safe + types: + - ['vget_low_s8', 'int8x16_t', 'int8x8_t', '[0, 1, 2, 3, 4, 5, 6, 7]'] + - ['vget_low_u8', 'uint8x16_t', 'uint8x8_t','[0, 1, 2, 3, 4, 5, 6, 7]'] + - ['vget_low_p8', 'poly8x16_t', 'poly8x8_t','[0, 1, 2, 3, 4, 5, 6, 7]'] + - ['vget_low_s16', 'int16x8_t', 'int16x4_t', '[0, 1, 2, 3]'] + - ['vget_low_u16', 'uint16x8_t', 'uint16x4_t', '[0, 1, 2, 3]'] + - ['vget_low_p16', 'poly16x8_t', 'poly16x4_t', '[0, 1, 2, 3]'] + - ['vget_low_s32', 'int32x4_t', 'int32x2_t', '[0, 1]'] + - ['vget_low_f32', 'float32x4_t', 'float32x2_t', '[0, 1]'] + - ['vget_low_u32', 'uint32x4_t', 'uint32x2_t', '[0, 1]'] + compose: + - FnCall: ['simd_shuffle!', [a, a, '{type[3]}']] + + - name: "{type[0]}" + doc: "Duplicate vector element to vector or scalar" + arguments: ["value: {type[1]}"] + return_type: "{neon_type[2]}" + attr: + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, { FnCall: [assert_instr, ['"{type[3]}"']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, ['{type[4]}']]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable + safety: safe + types: + - ['vdupq_n_s8', 'i8', 'int8x16_t', 'vdup.8', 'dup', 'int8x16_t::splat(value)'] + - ['vdupq_n_s16', 'i16', 'int16x8_t', 'vdup.16', 'dup', 'int16x8_t::splat(value)'] + - ['vdupq_n_s32', 'i32', 'int32x4_t', 'vdup.32', 'dup', 'int32x4_t::splat(value)'] + - ['vdupq_n_s64', 'i64', 'int64x2_t', 'vmov', 'dup', 'int64x2_t::splat(value)'] + - ['vdupq_n_u8', 'u8', 'uint8x16_t', 'vdup.8', 'dup', 'uint8x16_t::splat(value)'] + - ['vdupq_n_u16', 'u16', 'uint16x8_t', 'vdup.16', 'dup', 'uint16x8_t::splat(value)'] + - ['vdupq_n_u32', 'u32', 'uint32x4_t', 'vdup.32', 'dup', 'uint32x4_t::splat(value)'] + - ['vdupq_n_f32', 'f32', 'float32x4_t', 'vdup.32', 'dup', 'float32x4_t::splat(value)'] + - ['vdupq_n_u64', 'u64', 'uint64x2_t', 'vmov', 'dup', 'uint64x2_t::splat(value)'] + - ['vdupq_n_p8', 'u8', 'poly8x16_t', 'vdup.8', 'dup', 'poly8x16_t::splat(value)'] + - ['vdupq_n_p16', 'p16', 'poly16x8_t', 'vdup.16', 'dup', 'poly16x8_t::splat(value)'] + - ['vdup_n_s8', 'i8', 'int8x8_t', 'vdup.8', 'dup', 'int8x8_t::splat(value)'] + - ['vdup_n_s16', 'i16', 'int16x4_t', 'vdup.16', 'dup', 'int16x4_t::splat(value)'] + - ['vdup_n_s32', 'i32', 'int32x2_t', 'vdup.32', 'dup', 'int32x2_t::splat(value)'] + - ['vdup_n_s64', 'i64', 'int64x1_t', 'vmov', 'fmov', 'int64x1_t::splat(value)'] + - ['vdup_n_u8', 'u8', 'uint8x8_t', 'vdup.8', 'dup', 'uint8x8_t::splat(value)'] + - ['vdup_n_u16', 'u16', 'uint16x4_t', 'vdup.16', 'dup', 'uint16x4_t::splat(value)'] + - ['vdup_n_u32', 'u32', 'uint32x2_t', 'vdup.32', 'dup', 'uint32x2_t::splat(value)'] + - ['vdup_n_f32', 'f32', 'float32x2_t', 'vdup.32', 'dup', 'float32x2_t::splat(value)'] + - ['vdup_n_u64', 'u64', 'uint64x1_t', 'vmov', 'fmov', 'uint64x1_t::splat(value)'] + - ['vdup_n_p8', 'p8', 'poly8x8_t', 'vdup.8', 'dup', 'poly8x8_t::splat(value)'] + - ['vdup_n_p16', 'p16', 'poly16x4_t', 'vdup.16', 'dup', 'poly16x4_t::splat(value)'] + compose: + - Identifier: ['{type[5]}', Symbol] + + - name: "{type[0]}" + doc: "Duplicate vector element to vector or scalar" + arguments: ["value: {type[1]}"] + return_type: "{neon_type[2]}" + attr: + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, { FnCall: [assert_instr, ['"{type[3]}"']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, ['{type[4]}']]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable + safety: safe + types: + - ['vmov_n_s8', 'i8', 'int8x8_t', 'vdup.8', 'dup', 'vdup_n_s8'] + - ['vmov_n_s16', 'i16', 'int16x4_t', 'vdup.16', 'dup', 'vdup_n_s16'] + - ['vmov_n_s32', 'i32', 'int32x2_t', 'vdup.32', 'dup', 'vdup_n_s32'] + - ['vmov_n_s64', 'i64', 'int64x1_t', 'vmov', 'fmov', 'vdup_n_s64'] + - ['vmov_n_u8', 'u8', 'uint8x8_t', 'vdup.8', 'dup', 'vdup_n_u8'] + - ['vmov_n_u16', 'u16', 'uint16x4_t', 'vdup.16', 'dup', 'vdup_n_u16'] + - ['vmov_n_u32', 'u32', 'uint32x2_t', 'vdup.32', 'dup', 'vdup_n_u32'] + - ['vmov_n_u64', 'u64', 'uint64x1_t', 'vmov', 'fmov', 'vdup_n_u64'] + - ['vmov_n_p8', 'p8', 'poly8x8_t', 'vdup.8', 'dup', 'vdup_n_p8'] + - ['vmov_n_p16', 'p16', 'poly16x4_t', 'vdup.16', 'dup', 'vdup_n_p16'] + - ['vmov_n_f32', 'f32', 'float32x2_t', 'vdup.32', 'dup', 'vdup_n_f32'] + - ['vmovq_n_s8', 'i8', 'int8x16_t', 'vdup.8', 'dup', 'vdupq_n_s8'] + - ['vmovq_n_s16', 'i16', 'int16x8_t', 'vdup.16', 'dup', 'vdupq_n_s16'] + - ['vmovq_n_s32', 'i32', 'int32x4_t', 'vdup.32', 'dup', 'vdupq_n_s32'] + - ['vmovq_n_s64', 'i64', 'int64x2_t', 'vmov', 'dup', 'vdupq_n_s64'] + - ['vmovq_n_u8', 'u8', 'uint8x16_t', 'vdup.8', 'dup', 'vdupq_n_u8'] + - ['vmovq_n_u16', 'u16', 'uint16x8_t', 'vdup.16', 'dup', 'vdupq_n_u16'] + - ['vmovq_n_u32', 'u32', 'uint32x4_t', 'vdup.32', 'dup', 'vdupq_n_u32'] + - ['vmovq_n_u64', 'u64', 'uint64x2_t', 'vmov', 'dup', 'vdupq_n_u64'] + - ['vmovq_n_p8', 'p8', 'poly8x16_t', 'vdup.8', 'dup', 'vdupq_n_p8'] + - ['vmovq_n_p16', 'p16', 'poly16x8_t', 'vdup.16', 'dup', 'vdupq_n_p16'] + - ['vmovq_n_f32', 'f32', 'float32x4_t', 'vdup.32', 'dup', 'vdupq_n_f32'] + compose: + - FnCall: ['{type[5]}', [value]] + + - name: "{type[0]}" + doc: "Store SIMD&FP register (immediate offset)" + arguments: ["a: {type[1]}"] + return_type: "{type[2]}" + attr: + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['nop']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, ['nop']]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable + safety: + unsafe: [neon] + types: + - ['vldrq_p128', '* const p128', 'p128'] + compose: + - Identifier: ['*a', Symbol] + + - name: "{type[0]}" + doc: "Store SIMD&FP register (immediate offset)" + arguments: ["a: {type[1]}", "b: {type[2]}"] + attr: + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['nop']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, ['nop']]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable + safety: + unsafe: [neon] + types: + - ['vstrq_p128', '* mut p128', 'p128'] + compose: + - Identifier: ['*a = b', Symbol] + + - name: "{type[0]}" + doc: "Extract vector from pair of vectors" + arguments: ["a: {neon_type[1]}", "_b: {neon_type[1]}"] + return_type: "{neon_type[1]}" + attr: + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['nop', 'N = 0']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, ['nop', 'N = 0']]}]] + - FnCall: [rustc_legacy_const_generics, ['2']] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable + static_defs: ['const N: i32'] + safety: + unsafe: [neon] + types: + - ['vext_s64', 'int64x1_t'] + - ['vext_u64', 'uint64x1_t'] + compose: + - FnCall: ['static_assert!', ['N == 0']] + - Identifier: ['a', Symbol] + + - name: "{type[0]}" + doc: "Reversing vector elements (swap endianness)" + arguments: ["a: {neon_type[1]}"] + return_type: "{neon_type[1]}" + attr: + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"{type[2]}"']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, ['{type[3]}']]}]] + - *neon-not-arm-stable + - *neon-cfg-arm-unstable + safety: safe + types: + - ['vrev16_s8', 'int8x8_t', 'vrev16.8', 'rev16', '[1, 0, 3, 2, 5, 4, 7, 6]'] + - ['vrev16q_s8', 'int8x16_t', 'vrev16.8', 'rev16', '[1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14]'] + - ['vrev16_u8', 'uint8x8_t', 'vrev16.8', 'rev16', '[1, 0, 3, 2, 5, 4, 7, 6]'] + - ['vrev16q_u8', 'uint8x16_t', 'vrev16.8', 'rev16', '[1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14]'] + - ['vrev16_p8', 'poly8x8_t', 'vrev16.8', 'rev16', '[1, 0, 3, 2, 5, 4, 7, 6]'] + - ['vrev16q_p8', 'poly8x16_t', 'vrev16.8', 'rev16', '[1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14]'] + - ['vrev32_s8', 'int8x8_t', 'vrev32.8', 'rev32', '[3, 2, 1, 0, 7, 6, 5, 4]'] + - ['vrev32q_s8', 'int8x16_t', 'vrev32.8', 'rev32', '[3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12]'] + - ['vrev32_u8', 'uint8x8_t', 'vrev32.8', 'rev32', '[3, 2, 1, 0, 7, 6, 5, 4]'] + - ['vrev32q_u8', 'uint8x16_t', 'vrev32.8', 'rev32', '[3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12]'] + - ['vrev32_p8', 'poly8x8_t', 'vrev32.8', 'rev32', '[3, 2, 1, 0, 7, 6, 5, 4]'] + - ['vrev32q_p8', 'poly8x16_t', 'vrev32.8', 'rev32', '[3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12]'] + - ['vrev32_s16', 'int16x4_t', 'vrev32.16', 'rev32', '[1, 0, 3, 2]'] + - ['vrev32q_s16', 'int16x8_t', 'vrev32.16', 'rev32', '[1, 0, 3, 2, 5, 4, 7, 6]'] + - ['vrev32_u16', 'uint16x4_t', 'vrev32.16', 'rev32', '[1, 0, 3, 2]'] + - ['vrev32q_u16', 'uint16x8_t', 'vrev32.16', 'rev32', '[1, 0, 3, 2, 5, 4, 7, 6]'] + - ['vrev32_p16', 'poly16x4_t', 'vrev32.16', 'rev32', '[1, 0, 3, 2]'] + - ['vrev32q_p16', 'poly16x8_t', 'vrev32.16', 'rev32', '[1, 0, 3, 2, 5, 4, 7, 6]'] + - ['vrev64_s8', 'int8x8_t', 'vrev64.8', 'rev64', '[7, 6, 5, 4, 3, 2, 1, 0]'] + - ['vrev64q_s8', 'int8x16_t', 'vrev64.8', 'rev64', '[7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8]'] + - ['vrev64_u8', 'uint8x8_t', 'vrev64.8', 'rev64', '[7, 6, 5, 4, 3, 2, 1, 0]'] + - ['vrev64q_u8', 'uint8x16_t', 'vrev64.8', 'rev64', '[7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8]'] + - ['vrev64_p8', 'poly8x8_t', 'vrev64.8', 'rev64', '[7, 6, 5, 4, 3, 2, 1, 0]'] + - ['vrev64q_p8', 'poly8x16_t', 'vrev64.8', 'rev64', '[7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8]'] + - ['vrev64_s16', 'int16x4_t', 'vrev64.16', 'rev64', '[3, 2, 1, 0]'] + - ['vrev64q_s16', 'int16x8_t', 'vrev64.16', 'rev64', '[3, 2, 1, 0, 7, 6, 5, 4]'] + - ['vrev64_u16', 'uint16x4_t', 'vrev64.16', 'rev64', '[3, 2, 1, 0]'] + - ['vrev64q_u16', 'uint16x8_t', 'vrev64.16', 'rev64', '[3, 2, 1, 0, 7, 6, 5, 4]'] + - ['vrev64_p16', 'poly16x4_t', 'vrev64.16', 'rev64', '[3, 2, 1, 0]'] + - ['vrev64q_p16', 'poly16x8_t', 'vrev64.16', 'rev64', '[3, 2, 1, 0, 7, 6, 5, 4]'] + - ['vrev64_s32', 'int32x2_t', 'vrev64.32', 'rev64', '[1, 0]'] + - ['vrev64q_s32', 'int32x4_t', 'vrev64.32', 'rev64', '[1, 0, 3, 2]'] + - ['vrev64_u32', 'uint32x2_t', 'vrev64.32', 'rev64', '[1, 0]'] + - ['vrev64q_u32', 'uint32x4_t', 'vrev64.32', 'rev64', '[1, 0, 3, 2]'] + - ['vrev64_f32', 'float32x2_t', 'vrev64.32', 'rev64', '[1, 0]'] + - ['vrev64q_f32', 'float32x4_t', 'vrev64.32', 'rev64', '[1, 0, 3, 2]'] + compose: + - FnCall: ['simd_shuffle!', [a, a, '{type[4]}']] From 5e59779edcc6fb9c6eee8143379f783217e6febc Mon Sep 17 00:00:00 2001 From: James Barford-Evans Date: Tue, 25 Mar 2025 08:43:09 +0000 Subject: [PATCH 2/2] pr feedback - remove the commented out `vcombine_f16` --- crates/core_arch/src/arm_shared/neon/mod.rs | 19 ++----------------- 1 file changed, 2 insertions(+), 17 deletions(-) diff --git a/crates/core_arch/src/arm_shared/neon/mod.rs b/crates/core_arch/src/arm_shared/neon/mod.rs index 7a3e727b8c..84a3701cb7 100644 --- a/crates/core_arch/src/arm_shared/neon/mod.rs +++ b/crates/core_arch/src/arm_shared/neon/mod.rs @@ -1190,20 +1190,6 @@ impl_sign_conversions_neon! { (uint8x8x4_t, int8x8x4_t) } -/* FIXME: 16-bit float -/// Vector combine -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(test, assert_instr(nop))] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] pub fn vcombine_f16 ( low: float16x4_t, high: float16x4_t) -> float16x8_t { - unsafe { simd_shuffle!(low, high, [0, 1, 2, 3, 4, 5, 6, 7]) } -} -*/ - #[cfg(test)] mod tests { use super::*; @@ -5512,9 +5498,8 @@ mod tests { test_vcombine!(test_vcombine_s16 => vcombine_s16([3_i16, -4, 5, -6], [13_i16, -14, 15, -16])); test_vcombine!(test_vcombine_u16 => vcombine_u16([3_u16, 4, 5, 6], [13_u16, 14, 15, 16])); test_vcombine!(test_vcombine_p16 => vcombine_p16([3_u16, 4, 5, 6], [13_u16, 14, 15, 16])); - // FIXME: 16-bit floats - // test_vcombine!(test_vcombine_f16 => vcombine_f16([3_f16, 4., 5., 6.], - // [13_f16, 14., 15., 16.])); + test_vcombine!(test_vcombine_f16 => vcombine_f16([3_f16, 4., 5., 6.], + [13_f16, 14., 15., 16.])); test_vcombine!(test_vcombine_s32 => vcombine_s32([3_i32, -4], [13_i32, -14])); test_vcombine!(test_vcombine_u32 => vcombine_u32([3_u32, 4], [13_u32, 14]));