diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vcompress.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vcompress.c new file mode 100644 index 0000000000000..6cff3dfbbb7dd --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vcompress.c @@ -0,0 +1,74 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \ +// RUN: -target-feature +zvfbfmin -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vcompress_vm_bf16mf4( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1bf16.i64( poison, [[VS2]], [[VS1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vcompress_vm_bf16mf4(vbfloat16mf4_t vs2, vbool64_t vs1, + size_t vl) { + return __riscv_vcompress_vm_bf16mf4(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vcompress_vm_bf16mf2( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2bf16.i64( poison, [[VS2]], [[VS1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vcompress_vm_bf16mf2(vbfloat16mf2_t vs2, vbool32_t vs1, + size_t vl) { + return __riscv_vcompress_vm_bf16mf2(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vcompress_vm_bf16m1( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4bf16.i64( poison, [[VS2]], [[VS1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vcompress_vm_bf16m1(vbfloat16m1_t vs2, vbool16_t vs1, + size_t vl) { + return __riscv_vcompress_vm_bf16m1(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vcompress_vm_bf16m2( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8bf16.i64( poison, [[VS2]], [[VS1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vcompress_vm_bf16m2(vbfloat16m2_t vs2, vbool8_t vs1, + size_t vl) { + return __riscv_vcompress_vm_bf16m2(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vcompress_vm_bf16m4( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16bf16.i64( poison, [[VS2]], [[VS1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vcompress_vm_bf16m4(vbfloat16m4_t vs2, vbool4_t vs1, + size_t vl) { + return __riscv_vcompress_vm_bf16m4(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vcompress_vm_bf16m8( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv32bf16.i64( poison, [[VS2]], [[VS1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vcompress_vm_bf16m8(vbfloat16m8_t vs2, vbool2_t vs1, + size_t vl) { + return __riscv_vcompress_vm_bf16m8(vs2, vs1, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vrgather.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vrgather.c new file mode 100644 index 0000000000000..cb0004fa2b64d --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vrgather.c @@ -0,0 +1,272 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \ +// RUN: -target-feature +zvfbfmin -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_bf16mf4( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1bf16.i64( poison, [[VS2]], [[VS1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vrgather_vv_bf16mf4(vbfloat16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vrgather_vv_bf16mf4(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_bf16mf4( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1bf16.i64( poison, [[VS2]], i64 [[VS1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vrgather_vx_bf16mf4(vbfloat16mf4_t vs2, size_t vs1, + size_t vl) { + return __riscv_vrgather_vx_bf16mf4(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_bf16mf2( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2bf16.i64( poison, [[VS2]], [[VS1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vrgather_vv_bf16mf2(vbfloat16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { + return __riscv_vrgather_vv_bf16mf2(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_bf16mf2( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2bf16.i64( poison, [[VS2]], i64 [[VS1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vrgather_vx_bf16mf2(vbfloat16mf2_t vs2, size_t vs1, + size_t vl) { + return __riscv_vrgather_vx_bf16mf2(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_bf16m1( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4bf16.i64( poison, [[VS2]], [[VS1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vrgather_vv_bf16m1(vbfloat16m1_t vs2, vuint16m1_t vs1, + size_t vl) { + return __riscv_vrgather_vv_bf16m1(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_bf16m1( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4bf16.i64( poison, [[VS2]], i64 [[VS1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vrgather_vx_bf16m1(vbfloat16m1_t vs2, size_t vs1, + size_t vl) { + return __riscv_vrgather_vx_bf16m1(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_bf16m2( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8bf16.i64( poison, [[VS2]], [[VS1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vrgather_vv_bf16m2(vbfloat16m2_t vs2, vuint16m2_t vs1, + size_t vl) { + return __riscv_vrgather_vv_bf16m2(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_bf16m2( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8bf16.i64( poison, [[VS2]], i64 [[VS1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vrgather_vx_bf16m2(vbfloat16m2_t vs2, size_t vs1, + size_t vl) { + return __riscv_vrgather_vx_bf16m2(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_bf16m4( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16bf16.i64( poison, [[VS2]], [[VS1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vrgather_vv_bf16m4(vbfloat16m4_t vs2, vuint16m4_t vs1, + size_t vl) { + return __riscv_vrgather_vv_bf16m4(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_bf16m4( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16bf16.i64( poison, [[VS2]], i64 [[VS1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vrgather_vx_bf16m4(vbfloat16m4_t vs2, size_t vs1, + size_t vl) { + return __riscv_vrgather_vx_bf16m4(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_bf16m8( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv32bf16.i64( poison, [[VS2]], [[VS1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vrgather_vv_bf16m8(vbfloat16m8_t vs2, vuint16m8_t vs1, + size_t vl) { + return __riscv_vrgather_vv_bf16m8(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_bf16m8( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv32bf16.i64( poison, [[VS2]], i64 [[VS1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vrgather_vx_bf16m8(vbfloat16m8_t vs2, size_t vs1, + size_t vl) { + return __riscv_vrgather_vx_bf16m8(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_bf16mf4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vrgather_vv_bf16mf4_m(vbool64_t vm, vbfloat16mf4_t vs2, + vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgather_vv_bf16mf4_m(vm, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_bf16mf4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1bf16.i64( poison, [[VS2]], i64 [[VS1]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vrgather_vx_bf16mf4_m(vbool64_t vm, vbfloat16mf4_t vs2, + size_t vs1, size_t vl) { + return __riscv_vrgather_vx_bf16mf4_m(vm, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_bf16mf2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vrgather_vv_bf16mf2_m(vbool32_t vm, vbfloat16mf2_t vs2, + vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgather_vv_bf16mf2_m(vm, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_bf16mf2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2bf16.i64( poison, [[VS2]], i64 [[VS1]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vrgather_vx_bf16mf2_m(vbool32_t vm, vbfloat16mf2_t vs2, + size_t vs1, size_t vl) { + return __riscv_vrgather_vx_bf16mf2_m(vm, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_bf16m1_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vrgather_vv_bf16m1_m(vbool16_t vm, vbfloat16m1_t vs2, + vuint16m1_t vs1, size_t vl) { + return __riscv_vrgather_vv_bf16m1_m(vm, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_bf16m1_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4bf16.i64( poison, [[VS2]], i64 [[VS1]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vrgather_vx_bf16m1_m(vbool16_t vm, vbfloat16m1_t vs2, + size_t vs1, size_t vl) { + return __riscv_vrgather_vx_bf16m1_m(vm, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_bf16m2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vrgather_vv_bf16m2_m(vbool8_t vm, vbfloat16m2_t vs2, + vuint16m2_t vs1, size_t vl) { + return __riscv_vrgather_vv_bf16m2_m(vm, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_bf16m2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8bf16.i64( poison, [[VS2]], i64 [[VS1]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vrgather_vx_bf16m2_m(vbool8_t vm, vbfloat16m2_t vs2, + size_t vs1, size_t vl) { + return __riscv_vrgather_vx_bf16m2_m(vm, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_bf16m4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vrgather_vv_bf16m4_m(vbool4_t vm, vbfloat16m4_t vs2, + vuint16m4_t vs1, size_t vl) { + return __riscv_vrgather_vv_bf16m4_m(vm, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_bf16m4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16bf16.i64( poison, [[VS2]], i64 [[VS1]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vrgather_vx_bf16m4_m(vbool4_t vm, vbfloat16m4_t vs2, + size_t vs1, size_t vl) { + return __riscv_vrgather_vx_bf16m4_m(vm, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_bf16m8_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv32bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vrgather_vv_bf16m8_m(vbool2_t vm, vbfloat16m8_t vs2, + vuint16m8_t vs1, size_t vl) { + return __riscv_vrgather_vv_bf16m8_m(vm, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_bf16m8_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv32bf16.i64( poison, [[VS2]], i64 [[VS1]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vrgather_vx_bf16m8_m(vbool2_t vm, vbfloat16m8_t vs2, + size_t vs1, size_t vl) { + return __riscv_vrgather_vx_bf16m8_m(vm, vs2, vs1, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vcompress.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vcompress.c new file mode 100644 index 0000000000000..40de6fdccf95f --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vcompress.c @@ -0,0 +1,74 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \ +// RUN: -target-feature +zvfbfmin -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vcompress_vm_bf16mf4( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1bf16.i64( poison, [[VS2]], [[VS1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vcompress_vm_bf16mf4(vbfloat16mf4_t vs2, vbool64_t vs1, + size_t vl) { + return __riscv_vcompress(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vcompress_vm_bf16mf2( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2bf16.i64( poison, [[VS2]], [[VS1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vcompress_vm_bf16mf2(vbfloat16mf2_t vs2, vbool32_t vs1, + size_t vl) { + return __riscv_vcompress(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vcompress_vm_bf16m1( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4bf16.i64( poison, [[VS2]], [[VS1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vcompress_vm_bf16m1(vbfloat16m1_t vs2, vbool16_t vs1, + size_t vl) { + return __riscv_vcompress(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vcompress_vm_bf16m2( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8bf16.i64( poison, [[VS2]], [[VS1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vcompress_vm_bf16m2(vbfloat16m2_t vs2, vbool8_t vs1, + size_t vl) { + return __riscv_vcompress(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vcompress_vm_bf16m4( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16bf16.i64( poison, [[VS2]], [[VS1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vcompress_vm_bf16m4(vbfloat16m4_t vs2, vbool4_t vs1, + size_t vl) { + return __riscv_vcompress(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vcompress_vm_bf16m8( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv32bf16.i64( poison, [[VS2]], [[VS1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vcompress_vm_bf16m8(vbfloat16m8_t vs2, vbool2_t vs1, + size_t vl) { + return __riscv_vcompress(vs2, vs1, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vrgather.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vrgather.c new file mode 100644 index 0000000000000..068d8498997b6 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vrgather.c @@ -0,0 +1,272 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \ +// RUN: -target-feature +zvfbfmin -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_bf16mf4( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1bf16.i64( poison, [[VS2]], [[VS1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vrgather_vv_bf16mf4(vbfloat16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_bf16mf4( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1bf16.i64( poison, [[VS2]], i64 [[VS1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vrgather_vx_bf16mf4(vbfloat16mf4_t vs2, size_t vs1, + size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_bf16mf2( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2bf16.i64( poison, [[VS2]], [[VS1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vrgather_vv_bf16mf2(vbfloat16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_bf16mf2( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2bf16.i64( poison, [[VS2]], i64 [[VS1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vrgather_vx_bf16mf2(vbfloat16mf2_t vs2, size_t vs1, + size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_bf16m1( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4bf16.i64( poison, [[VS2]], [[VS1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vrgather_vv_bf16m1(vbfloat16m1_t vs2, vuint16m1_t vs1, + size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_bf16m1( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4bf16.i64( poison, [[VS2]], i64 [[VS1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vrgather_vx_bf16m1(vbfloat16m1_t vs2, size_t vs1, + size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_bf16m2( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8bf16.i64( poison, [[VS2]], [[VS1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vrgather_vv_bf16m2(vbfloat16m2_t vs2, vuint16m2_t vs1, + size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_bf16m2( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8bf16.i64( poison, [[VS2]], i64 [[VS1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vrgather_vx_bf16m2(vbfloat16m2_t vs2, size_t vs1, + size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_bf16m4( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16bf16.i64( poison, [[VS2]], [[VS1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vrgather_vv_bf16m4(vbfloat16m4_t vs2, vuint16m4_t vs1, + size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_bf16m4( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16bf16.i64( poison, [[VS2]], i64 [[VS1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vrgather_vx_bf16m4(vbfloat16m4_t vs2, size_t vs1, + size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_bf16m8( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv32bf16.i64( poison, [[VS2]], [[VS1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vrgather_vv_bf16m8(vbfloat16m8_t vs2, vuint16m8_t vs1, + size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_bf16m8( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv32bf16.i64( poison, [[VS2]], i64 [[VS1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vrgather_vx_bf16m8(vbfloat16m8_t vs2, size_t vs1, + size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_bf16mf4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vrgather_vv_bf16mf4_m(vbool64_t vm, vbfloat16mf4_t vs2, + vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_bf16mf4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1bf16.i64( poison, [[VS2]], i64 [[VS1]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vrgather_vx_bf16mf4_m(vbool64_t vm, vbfloat16mf4_t vs2, + size_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_bf16mf2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vrgather_vv_bf16mf2_m(vbool32_t vm, vbfloat16mf2_t vs2, + vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_bf16mf2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2bf16.i64( poison, [[VS2]], i64 [[VS1]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vrgather_vx_bf16mf2_m(vbool32_t vm, vbfloat16mf2_t vs2, + size_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_bf16m1_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vrgather_vv_bf16m1_m(vbool16_t vm, vbfloat16m1_t vs2, + vuint16m1_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_bf16m1_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4bf16.i64( poison, [[VS2]], i64 [[VS1]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vrgather_vx_bf16m1_m(vbool16_t vm, vbfloat16m1_t vs2, + size_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_bf16m2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vrgather_vv_bf16m2_m(vbool8_t vm, vbfloat16m2_t vs2, + vuint16m2_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_bf16m2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8bf16.i64( poison, [[VS2]], i64 [[VS1]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vrgather_vx_bf16m2_m(vbool8_t vm, vbfloat16m2_t vs2, + size_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_bf16m4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vrgather_vv_bf16m4_m(vbool4_t vm, vbfloat16m4_t vs2, + vuint16m4_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_bf16m4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16bf16.i64( poison, [[VS2]], i64 [[VS1]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vrgather_vx_bf16m4_m(vbool4_t vm, vbfloat16m4_t vs2, + size_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_bf16m8_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv32bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vrgather_vv_bf16m8_m(vbool2_t vm, vbfloat16m8_t vs2, + vuint16m8_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_bf16m8_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv32bf16.i64( poison, [[VS2]], i64 [[VS1]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vrgather_vx_bf16m8_m(vbool2_t vm, vbfloat16m8_t vs2, + size_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vcompress.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vcompress.c new file mode 100644 index 0000000000000..90160c8fe19c3 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vcompress.c @@ -0,0 +1,68 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \ +// RUN: -target-feature +zvfbfmin -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vcompress_vm_bf16mf4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vcompress_vm_bf16mf4_tu(vbfloat16mf4_t vd, vbfloat16mf4_t vs2, vbool64_t vs1, size_t vl) { + return __riscv_vcompress_vm_bf16mf4_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vcompress_vm_bf16mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vcompress_vm_bf16mf2_tu(vbfloat16mf2_t vd, vbfloat16mf2_t vs2, vbool32_t vs1, size_t vl) { + return __riscv_vcompress_vm_bf16mf2_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vcompress_vm_bf16m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vcompress_vm_bf16m1_tu(vbfloat16m1_t vd, vbfloat16m1_t vs2, vbool16_t vs1, size_t vl) { + return __riscv_vcompress_vm_bf16m1_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vcompress_vm_bf16m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vcompress_vm_bf16m2_tu(vbfloat16m2_t vd, vbfloat16m2_t vs2, vbool8_t vs1, size_t vl) { + return __riscv_vcompress_vm_bf16m2_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vcompress_vm_bf16m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vcompress_vm_bf16m4_tu(vbfloat16m4_t vd, vbfloat16m4_t vs2, vbool4_t vs1, size_t vl) { + return __riscv_vcompress_vm_bf16m4_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vcompress_vm_bf16m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv32bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vcompress_vm_bf16m8_tu(vbfloat16m8_t vd, vbfloat16m8_t vs2, vbool2_t vs1, size_t vl) { + return __riscv_vcompress_vm_bf16m8_tu(vd, vs2, vs1, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vrgather.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vrgather.c new file mode 100644 index 0000000000000..137ab17c190b9 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vrgather.c @@ -0,0 +1,488 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \ +// RUN: -target-feature +zvfbfmin -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_bf16mf4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vrgather_vv_bf16mf4_tu(vbfloat16mf4_t vd, vbfloat16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgather_vv_bf16mf4_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_bf16mf4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1bf16.i64( [[VD]], [[VS2]], i64 [[VS1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vrgather_vx_bf16mf4_tu(vbfloat16mf4_t vd, vbfloat16mf4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_bf16mf4_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_bf16mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vrgather_vv_bf16mf2_tu(vbfloat16mf2_t vd, vbfloat16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgather_vv_bf16mf2_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_bf16mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2bf16.i64( [[VD]], [[VS2]], i64 [[VS1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vrgather_vx_bf16mf2_tu(vbfloat16mf2_t vd, vbfloat16mf2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_bf16mf2_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_bf16m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vrgather_vv_bf16m1_tu(vbfloat16m1_t vd, vbfloat16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgather_vv_bf16m1_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_bf16m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4bf16.i64( [[VD]], [[VS2]], i64 [[VS1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vrgather_vx_bf16m1_tu(vbfloat16m1_t vd, vbfloat16m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_bf16m1_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_bf16m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vrgather_vv_bf16m2_tu(vbfloat16m2_t vd, vbfloat16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgather_vv_bf16m2_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_bf16m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8bf16.i64( [[VD]], [[VS2]], i64 [[VS1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vrgather_vx_bf16m2_tu(vbfloat16m2_t vd, vbfloat16m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_bf16m2_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_bf16m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vrgather_vv_bf16m4_tu(vbfloat16m4_t vd, vbfloat16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgather_vv_bf16m4_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_bf16m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16bf16.i64( [[VD]], [[VS2]], i64 [[VS1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vrgather_vx_bf16m4_tu(vbfloat16m4_t vd, vbfloat16m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_bf16m4_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_bf16m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv32bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vrgather_vv_bf16m8_tu(vbfloat16m8_t vd, vbfloat16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrgather_vv_bf16m8_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_bf16m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv32bf16.i64( [[VD]], [[VS2]], i64 [[VS1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vrgather_vx_bf16m8_tu(vbfloat16m8_t vd, vbfloat16m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_bf16m8_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_bf16mf4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vrgather_vv_bf16mf4_tum(vbool64_t vm, vbfloat16mf4_t vd, vbfloat16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgather_vv_bf16mf4_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_bf16mf4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1bf16.i64( [[VD]], [[VS2]], i64 [[VS1]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vrgather_vx_bf16mf4_tum(vbool64_t vm, vbfloat16mf4_t vd, vbfloat16mf4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_bf16mf4_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_bf16mf2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vrgather_vv_bf16mf2_tum(vbool32_t vm, vbfloat16mf2_t vd, vbfloat16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgather_vv_bf16mf2_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_bf16mf2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2bf16.i64( [[VD]], [[VS2]], i64 [[VS1]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vrgather_vx_bf16mf2_tum(vbool32_t vm, vbfloat16mf2_t vd, vbfloat16mf2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_bf16mf2_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_bf16m1_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vrgather_vv_bf16m1_tum(vbool16_t vm, vbfloat16m1_t vd, vbfloat16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgather_vv_bf16m1_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_bf16m1_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4bf16.i64( [[VD]], [[VS2]], i64 [[VS1]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vrgather_vx_bf16m1_tum(vbool16_t vm, vbfloat16m1_t vd, vbfloat16m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_bf16m1_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_bf16m2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vrgather_vv_bf16m2_tum(vbool8_t vm, vbfloat16m2_t vd, vbfloat16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgather_vv_bf16m2_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_bf16m2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8bf16.i64( [[VD]], [[VS2]], i64 [[VS1]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vrgather_vx_bf16m2_tum(vbool8_t vm, vbfloat16m2_t vd, vbfloat16m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_bf16m2_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_bf16m4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vrgather_vv_bf16m4_tum(vbool4_t vm, vbfloat16m4_t vd, vbfloat16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgather_vv_bf16m4_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_bf16m4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16bf16.i64( [[VD]], [[VS2]], i64 [[VS1]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vrgather_vx_bf16m4_tum(vbool4_t vm, vbfloat16m4_t vd, vbfloat16m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_bf16m4_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_bf16m8_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv32bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vrgather_vv_bf16m8_tum(vbool2_t vm, vbfloat16m8_t vd, vbfloat16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrgather_vv_bf16m8_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_bf16m8_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv32bf16.i64( [[VD]], [[VS2]], i64 [[VS1]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vrgather_vx_bf16m8_tum(vbool2_t vm, vbfloat16m8_t vd, vbfloat16m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_bf16m8_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_bf16mf4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vrgather_vv_bf16mf4_tumu(vbool64_t vm, vbfloat16mf4_t vd, vbfloat16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgather_vv_bf16mf4_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_bf16mf4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1bf16.i64( [[VD]], [[VS2]], i64 [[VS1]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vrgather_vx_bf16mf4_tumu(vbool64_t vm, vbfloat16mf4_t vd, vbfloat16mf4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_bf16mf4_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_bf16mf2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vrgather_vv_bf16mf2_tumu(vbool32_t vm, vbfloat16mf2_t vd, vbfloat16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgather_vv_bf16mf2_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_bf16mf2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2bf16.i64( [[VD]], [[VS2]], i64 [[VS1]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vrgather_vx_bf16mf2_tumu(vbool32_t vm, vbfloat16mf2_t vd, vbfloat16mf2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_bf16mf2_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_bf16m1_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vrgather_vv_bf16m1_tumu(vbool16_t vm, vbfloat16m1_t vd, vbfloat16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgather_vv_bf16m1_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_bf16m1_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4bf16.i64( [[VD]], [[VS2]], i64 [[VS1]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vrgather_vx_bf16m1_tumu(vbool16_t vm, vbfloat16m1_t vd, vbfloat16m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_bf16m1_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_bf16m2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vrgather_vv_bf16m2_tumu(vbool8_t vm, vbfloat16m2_t vd, vbfloat16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgather_vv_bf16m2_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_bf16m2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8bf16.i64( [[VD]], [[VS2]], i64 [[VS1]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vrgather_vx_bf16m2_tumu(vbool8_t vm, vbfloat16m2_t vd, vbfloat16m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_bf16m2_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_bf16m4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vrgather_vv_bf16m4_tumu(vbool4_t vm, vbfloat16m4_t vd, vbfloat16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgather_vv_bf16m4_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_bf16m4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16bf16.i64( [[VD]], [[VS2]], i64 [[VS1]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vrgather_vx_bf16m4_tumu(vbool4_t vm, vbfloat16m4_t vd, vbfloat16m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_bf16m4_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_bf16m8_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv32bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vrgather_vv_bf16m8_tumu(vbool2_t vm, vbfloat16m8_t vd, vbfloat16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrgather_vv_bf16m8_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_bf16m8_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv32bf16.i64( [[VD]], [[VS2]], i64 [[VS1]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vrgather_vx_bf16m8_tumu(vbool2_t vm, vbfloat16m8_t vd, vbfloat16m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_bf16m8_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_bf16mf4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vrgather_vv_bf16mf4_mu(vbool64_t vm, vbfloat16mf4_t vd, vbfloat16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgather_vv_bf16mf4_mu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_bf16mf4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1bf16.i64( [[VD]], [[VS2]], i64 [[VS1]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vrgather_vx_bf16mf4_mu(vbool64_t vm, vbfloat16mf4_t vd, vbfloat16mf4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_bf16mf4_mu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_bf16mf2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vrgather_vv_bf16mf2_mu(vbool32_t vm, vbfloat16mf2_t vd, vbfloat16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgather_vv_bf16mf2_mu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_bf16mf2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2bf16.i64( [[VD]], [[VS2]], i64 [[VS1]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vrgather_vx_bf16mf2_mu(vbool32_t vm, vbfloat16mf2_t vd, vbfloat16mf2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_bf16mf2_mu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_bf16m1_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vrgather_vv_bf16m1_mu(vbool16_t vm, vbfloat16m1_t vd, vbfloat16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgather_vv_bf16m1_mu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_bf16m1_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4bf16.i64( [[VD]], [[VS2]], i64 [[VS1]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vrgather_vx_bf16m1_mu(vbool16_t vm, vbfloat16m1_t vd, vbfloat16m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_bf16m1_mu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_bf16m2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vrgather_vv_bf16m2_mu(vbool8_t vm, vbfloat16m2_t vd, vbfloat16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgather_vv_bf16m2_mu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_bf16m2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8bf16.i64( [[VD]], [[VS2]], i64 [[VS1]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vrgather_vx_bf16m2_mu(vbool8_t vm, vbfloat16m2_t vd, vbfloat16m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_bf16m2_mu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_bf16m4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vrgather_vv_bf16m4_mu(vbool4_t vm, vbfloat16m4_t vd, vbfloat16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgather_vv_bf16m4_mu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_bf16m4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16bf16.i64( [[VD]], [[VS2]], i64 [[VS1]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vrgather_vx_bf16m4_mu(vbool4_t vm, vbfloat16m4_t vd, vbfloat16m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_bf16m4_mu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_bf16m8_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv32bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vrgather_vv_bf16m8_mu(vbool2_t vm, vbfloat16m8_t vd, vbfloat16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrgather_vv_bf16m8_mu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_bf16m8_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv32bf16.i64( [[VD]], [[VS2]], i64 [[VS1]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vrgather_vx_bf16m8_mu(vbool2_t vm, vbfloat16m8_t vd, vbfloat16m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_bf16m8_mu(vm, vd, vs2, vs1, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vcompress.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vcompress.c new file mode 100644 index 0000000000000..079977a0a06b0 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vcompress.c @@ -0,0 +1,76 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \ +// RUN: -target-feature +zvfbfmin -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vcompress_vm_bf16mf4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vcompress_vm_bf16mf4_tu(vbfloat16mf4_t vd, + vbfloat16mf4_t vs2, vbool64_t vs1, + size_t vl) { + return __riscv_vcompress_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vcompress_vm_bf16mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vcompress_vm_bf16mf2_tu(vbfloat16mf2_t vd, + vbfloat16mf2_t vs2, vbool32_t vs1, + size_t vl) { + return __riscv_vcompress_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vcompress_vm_bf16m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vcompress_vm_bf16m1_tu(vbfloat16m1_t vd, vbfloat16m1_t vs2, + vbool16_t vs1, size_t vl) { + return __riscv_vcompress_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vcompress_vm_bf16m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vcompress_vm_bf16m2_tu(vbfloat16m2_t vd, vbfloat16m2_t vs2, + vbool8_t vs1, size_t vl) { + return __riscv_vcompress_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vcompress_vm_bf16m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vcompress_vm_bf16m4_tu(vbfloat16m4_t vd, vbfloat16m4_t vs2, + vbool4_t vs1, size_t vl) { + return __riscv_vcompress_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vcompress_vm_bf16m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv32bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vcompress_vm_bf16m8_tu(vbfloat16m8_t vd, vbfloat16m8_t vs2, + vbool2_t vs1, size_t vl) { + return __riscv_vcompress_tu(vd, vs2, vs1, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vrgather.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vrgather.c new file mode 100644 index 0000000000000..7a5624aed608b --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vrgather.c @@ -0,0 +1,576 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \ +// RUN: -target-feature +zvfbfmin -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_bf16mf4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vrgather_vv_bf16mf4_tu(vbfloat16mf4_t vd, + vbfloat16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_bf16mf4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1bf16.i64( [[VD]], [[VS2]], i64 [[VS1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vrgather_vx_bf16mf4_tu(vbfloat16mf4_t vd, + vbfloat16mf4_t vs2, size_t vs1, + size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_bf16mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vrgather_vv_bf16mf2_tu(vbfloat16mf2_t vd, + vbfloat16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_bf16mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2bf16.i64( [[VD]], [[VS2]], i64 [[VS1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vrgather_vx_bf16mf2_tu(vbfloat16mf2_t vd, + vbfloat16mf2_t vs2, size_t vs1, + size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_bf16m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vrgather_vv_bf16m1_tu(vbfloat16m1_t vd, vbfloat16m1_t vs2, + vuint16m1_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_bf16m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4bf16.i64( [[VD]], [[VS2]], i64 [[VS1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vrgather_vx_bf16m1_tu(vbfloat16m1_t vd, vbfloat16m1_t vs2, + size_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_bf16m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vrgather_vv_bf16m2_tu(vbfloat16m2_t vd, vbfloat16m2_t vs2, + vuint16m2_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_bf16m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8bf16.i64( [[VD]], [[VS2]], i64 [[VS1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vrgather_vx_bf16m2_tu(vbfloat16m2_t vd, vbfloat16m2_t vs2, + size_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_bf16m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vrgather_vv_bf16m4_tu(vbfloat16m4_t vd, vbfloat16m4_t vs2, + vuint16m4_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_bf16m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16bf16.i64( [[VD]], [[VS2]], i64 [[VS1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vrgather_vx_bf16m4_tu(vbfloat16m4_t vd, vbfloat16m4_t vs2, + size_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_bf16m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv32bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vrgather_vv_bf16m8_tu(vbfloat16m8_t vd, vbfloat16m8_t vs2, + vuint16m8_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_bf16m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv32bf16.i64( [[VD]], [[VS2]], i64 [[VS1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vrgather_vx_bf16m8_tu(vbfloat16m8_t vd, vbfloat16m8_t vs2, + size_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_bf16mf4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vrgather_vv_bf16mf4_tum(vbool64_t vm, vbfloat16mf4_t vd, + vbfloat16mf4_t vs2, + vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_bf16mf4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1bf16.i64( [[VD]], [[VS2]], i64 [[VS1]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vrgather_vx_bf16mf4_tum(vbool64_t vm, vbfloat16mf4_t vd, + vbfloat16mf4_t vs2, size_t vs1, + size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_bf16mf2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vrgather_vv_bf16mf2_tum(vbool32_t vm, vbfloat16mf2_t vd, + vbfloat16mf2_t vs2, + vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_bf16mf2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2bf16.i64( [[VD]], [[VS2]], i64 [[VS1]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vrgather_vx_bf16mf2_tum(vbool32_t vm, vbfloat16mf2_t vd, + vbfloat16mf2_t vs2, size_t vs1, + size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_bf16m1_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vrgather_vv_bf16m1_tum(vbool16_t vm, vbfloat16m1_t vd, + vbfloat16m1_t vs2, vuint16m1_t vs1, + size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_bf16m1_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4bf16.i64( [[VD]], [[VS2]], i64 [[VS1]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vrgather_vx_bf16m1_tum(vbool16_t vm, vbfloat16m1_t vd, + vbfloat16m1_t vs2, size_t vs1, + size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_bf16m2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vrgather_vv_bf16m2_tum(vbool8_t vm, vbfloat16m2_t vd, + vbfloat16m2_t vs2, vuint16m2_t vs1, + size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_bf16m2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8bf16.i64( [[VD]], [[VS2]], i64 [[VS1]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vrgather_vx_bf16m2_tum(vbool8_t vm, vbfloat16m2_t vd, + vbfloat16m2_t vs2, size_t vs1, + size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_bf16m4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vrgather_vv_bf16m4_tum(vbool4_t vm, vbfloat16m4_t vd, + vbfloat16m4_t vs2, vuint16m4_t vs1, + size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_bf16m4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16bf16.i64( [[VD]], [[VS2]], i64 [[VS1]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vrgather_vx_bf16m4_tum(vbool4_t vm, vbfloat16m4_t vd, + vbfloat16m4_t vs2, size_t vs1, + size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_bf16m8_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv32bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vrgather_vv_bf16m8_tum(vbool2_t vm, vbfloat16m8_t vd, + vbfloat16m8_t vs2, vuint16m8_t vs1, + size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_bf16m8_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv32bf16.i64( [[VD]], [[VS2]], i64 [[VS1]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vrgather_vx_bf16m8_tum(vbool2_t vm, vbfloat16m8_t vd, + vbfloat16m8_t vs2, size_t vs1, + size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_bf16mf4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vrgather_vv_bf16mf4_tumu(vbool64_t vm, vbfloat16mf4_t vd, + vbfloat16mf4_t vs2, + vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_bf16mf4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1bf16.i64( [[VD]], [[VS2]], i64 [[VS1]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vrgather_vx_bf16mf4_tumu(vbool64_t vm, vbfloat16mf4_t vd, + vbfloat16mf4_t vs2, size_t vs1, + size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_bf16mf2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vrgather_vv_bf16mf2_tumu(vbool32_t vm, vbfloat16mf2_t vd, + vbfloat16mf2_t vs2, + vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_bf16mf2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2bf16.i64( [[VD]], [[VS2]], i64 [[VS1]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vrgather_vx_bf16mf2_tumu(vbool32_t vm, vbfloat16mf2_t vd, + vbfloat16mf2_t vs2, size_t vs1, + size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_bf16m1_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vrgather_vv_bf16m1_tumu(vbool16_t vm, vbfloat16m1_t vd, + vbfloat16m1_t vs2, vuint16m1_t vs1, + size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_bf16m1_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4bf16.i64( [[VD]], [[VS2]], i64 [[VS1]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vrgather_vx_bf16m1_tumu(vbool16_t vm, vbfloat16m1_t vd, + vbfloat16m1_t vs2, size_t vs1, + size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_bf16m2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vrgather_vv_bf16m2_tumu(vbool8_t vm, vbfloat16m2_t vd, + vbfloat16m2_t vs2, vuint16m2_t vs1, + size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_bf16m2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8bf16.i64( [[VD]], [[VS2]], i64 [[VS1]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vrgather_vx_bf16m2_tumu(vbool8_t vm, vbfloat16m2_t vd, + vbfloat16m2_t vs2, size_t vs1, + size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_bf16m4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vrgather_vv_bf16m4_tumu(vbool4_t vm, vbfloat16m4_t vd, + vbfloat16m4_t vs2, vuint16m4_t vs1, + size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_bf16m4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16bf16.i64( [[VD]], [[VS2]], i64 [[VS1]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vrgather_vx_bf16m4_tumu(vbool4_t vm, vbfloat16m4_t vd, + vbfloat16m4_t vs2, size_t vs1, + size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_bf16m8_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv32bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vrgather_vv_bf16m8_tumu(vbool2_t vm, vbfloat16m8_t vd, + vbfloat16m8_t vs2, vuint16m8_t vs1, + size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_bf16m8_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv32bf16.i64( [[VD]], [[VS2]], i64 [[VS1]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vrgather_vx_bf16m8_tumu(vbool2_t vm, vbfloat16m8_t vd, + vbfloat16m8_t vs2, size_t vs1, + size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_bf16mf4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vrgather_vv_bf16mf4_mu(vbool64_t vm, vbfloat16mf4_t vd, + vbfloat16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_bf16mf4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1bf16.i64( [[VD]], [[VS2]], i64 [[VS1]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vrgather_vx_bf16mf4_mu(vbool64_t vm, vbfloat16mf4_t vd, + vbfloat16mf4_t vs2, size_t vs1, + size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_bf16mf2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vrgather_vv_bf16mf2_mu(vbool32_t vm, vbfloat16mf2_t vd, + vbfloat16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_bf16mf2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2bf16.i64( [[VD]], [[VS2]], i64 [[VS1]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vrgather_vx_bf16mf2_mu(vbool32_t vm, vbfloat16mf2_t vd, + vbfloat16mf2_t vs2, size_t vs1, + size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_bf16m1_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vrgather_vv_bf16m1_mu(vbool16_t vm, vbfloat16m1_t vd, + vbfloat16m1_t vs2, vuint16m1_t vs1, + size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_bf16m1_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4bf16.i64( [[VD]], [[VS2]], i64 [[VS1]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vrgather_vx_bf16m1_mu(vbool16_t vm, vbfloat16m1_t vd, + vbfloat16m1_t vs2, size_t vs1, + size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_bf16m2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vrgather_vv_bf16m2_mu(vbool8_t vm, vbfloat16m2_t vd, + vbfloat16m2_t vs2, vuint16m2_t vs1, + size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_bf16m2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8bf16.i64( [[VD]], [[VS2]], i64 [[VS1]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vrgather_vx_bf16m2_mu(vbool8_t vm, vbfloat16m2_t vd, + vbfloat16m2_t vs2, size_t vs1, + size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_bf16m4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vrgather_vv_bf16m4_mu(vbool4_t vm, vbfloat16m4_t vd, + vbfloat16m4_t vs2, vuint16m4_t vs1, + size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_bf16m4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16bf16.i64( [[VD]], [[VS2]], i64 [[VS1]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vrgather_vx_bf16m4_mu(vbool4_t vm, vbfloat16m4_t vd, + vbfloat16m4_t vs2, size_t vs1, + size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_bf16m8_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv32bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vrgather_vv_bf16m8_mu(vbool2_t vm, vbfloat16m8_t vd, + vbfloat16m8_t vs2, vuint16m8_t vs1, + size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_bf16m8_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv32bf16.i64( [[VD]], [[VS2]], i64 [[VS1]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vrgather_vx_bf16m8_mu(vbool2_t vm, vbfloat16m8_t vd, + vbfloat16m8_t vs2, size_t vs1, + size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); +}