diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp index e95e21bda687e..dde1882f5eea8 100644 --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -9732,9 +9732,9 @@ SDValue RISCVTargetLowering::lowerINSERT_SUBVECTOR(SDValue Op, ElementCount::getScalable(RemIdx) + SubVecVT.getVectorElementCount(); VL = computeVLMax(SubVecVT, DL, DAG); - // Use tail agnostic policy if we're inserting over Vec's tail. + // Use tail agnostic policy if we're inserting over InterSubVT's tail. unsigned Policy = RISCVII::TAIL_UNDISTURBED_MASK_UNDISTURBED; - if (EndIndex == VecVT.getVectorElementCount()) + if (EndIndex == InterSubVT.getVectorElementCount()) Policy = RISCVII::TAIL_AGNOSTIC; // If we're inserting into the lowest elements, use a tail undisturbed diff --git a/llvm/test/CodeGen/RISCV/rvv/extract-subvector.ll b/llvm/test/CodeGen/RISCV/rvv/extract-subvector.ll index a2d02b6bb641b..76aa2b913c652 100644 --- a/llvm/test/CodeGen/RISCV/rvv/extract-subvector.ll +++ b/llvm/test/CodeGen/RISCV/rvv/extract-subvector.ll @@ -474,7 +474,7 @@ define @extract_nxv6f16_nxv12f16_6( %in) ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma ; CHECK-NEXT: vslidedown.vx v12, v9, a0 ; CHECK-NEXT: add a1, a0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vslideup.vx v12, v10, a0 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/insert-subvector.ll b/llvm/test/CodeGen/RISCV/rvv/insert-subvector.ll index d377082761736..b15896580d425 100644 --- a/llvm/test/CodeGen/RISCV/rvv/insert-subvector.ll +++ b/llvm/test/CodeGen/RISCV/rvv/insert-subvector.ll @@ -227,7 +227,7 @@ define @insert_nxv16i32_nxv1i32_1( %vec, ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 3 ; CHECK-NEXT: add a1, a0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vslideup.vx v8, v16, a0 ; CHECK-NEXT: ret %v = call @llvm.vector.insert.nxv1i32.nxv16i32( %vec, %subvec, i64 1) @@ -306,7 +306,7 @@ define @insert_nxv16i8_nxv1i8_7( %vec, @llvm.vector.insert.nxv1i8.nxv16i8( %vec, %subvec, i64 7) @@ -319,7 +319,7 @@ define @insert_nxv16i8_nxv1i8_15( %vec, @llvm.vector.insert.nxv1i8.nxv16i8( %vec, %subvec, i64 15) @@ -344,7 +344,7 @@ define @insert_nxv32f16_nxv2f16_2( %vec ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 2 ; CHECK-NEXT: add a1, a0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vslideup.vx v8, v16, a0 ; CHECK-NEXT: ret %v = call @llvm.vector.insert.nxv2f16.nxv32f16( %vec, %subvec, i64 2) @@ -357,7 +357,7 @@ define @insert_nxv32f16_nxv2f16_26( %ve ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 2 ; CHECK-NEXT: add a1, a0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vslideup.vx v14, v16, a0 ; CHECK-NEXT: ret %v = call @llvm.vector.insert.nxv2f16.nxv32f16( %vec, %subvec, i64 26) diff --git a/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-sdnode.ll index 515d77109af9f..6d42b15273cf8 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-sdnode.ll @@ -916,7 +916,7 @@ define half @vreduce_ord_fadd_nxv6f16( %v, half %s) { ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 2 ; CHECK-NEXT: add a1, a0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vslideup.vx v9, v10, a0 ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vfmv.s.f v10, fa0 @@ -938,11 +938,11 @@ define half @vreduce_ord_fadd_nxv10f16( %v, half %s) { ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 2 ; CHECK-NEXT: add a1, a0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vslideup.vx v10, v12, a0 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vmv.v.v v11, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vslideup.vx v11, v12, a0 ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vfmv.s.f v12, fa0 @@ -1002,7 +1002,7 @@ define half @vreduce_fadd_nxv6f16( %v, half %s) { ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 2 ; CHECK-NEXT: add a1, a0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vslideup.vx v9, v10, a0 ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vfmv.s.f v10, fa0 @@ -1025,11 +1025,11 @@ define half @vreduce_fmin_nxv10f16( %v) { ; CHECK-NEXT: vlse16.v v12, (a1), zero ; CHECK-NEXT: srli a0, a0, 2 ; CHECK-NEXT: add a1, a0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vslideup.vx v10, v12, a0 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vmv.v.v v11, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vslideup.vx v11, v12, a0 ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vfredmin.vs v8, v8, v8