From 8f320df5ad6c893692d93f001e3a5e86b532423b Mon Sep 17 00:00:00 2001 From: Luke Lau Date: Tue, 26 Sep 2023 12:57:46 +0100 Subject: [PATCH 1/5] [RISCV] Handle .vx/.vi pseudos in hasAllNBitUsers Vector pseudos with scalar operands only use the lower SEW bits (or less in the case of shifts and clips). This patch accounts for this in hasAllNBitUsers for both SDNodes in RISCVISelDAGToDAG. We also need to handle this in RISCVOptWInstrs otherwise we introduce slliw instructions that are less compressible than their original slli counterpart. --- llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp | 171 +++ llvm/lib/Target/RISCV/RISCVOptWInstrs.cpp | 164 +++ .../CodeGen/RISCV/rvv/constant-folding.ll | 32 +- .../rvv/fixed-vectors-deinterleave-load.ll | 93 +- .../rvv/fixed-vectors-reduction-int-vp.ll | 12 +- .../RISCV/rvv/named-vector-shuffle-reverse.ll | 1112 +++++++++-------- llvm/test/CodeGen/RISCV/rvv/sshl_sat_vec.ll | 12 +- .../RISCV/rvv/vector-deinterleave-fixed.ll | 84 +- llvm/test/CodeGen/RISCV/rvv/vmulh-sdnode.ll | 87 +- llvm/test/CodeGen/RISCV/rvv/vmulhu-sdnode.ll | 84 +- .../CodeGen/RISCV/rvv/vreductions-int-vp.ll | 21 +- 11 files changed, 1088 insertions(+), 784 deletions(-) diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp index 140473c595bbe..6925ab557e477 100644 --- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp @@ -2752,6 +2752,175 @@ bool RISCVDAGToDAGISel::selectSHXADD_UWOp(SDValue N, unsigned ShAmt, return false; } +static bool vectorPseudoHasAllNBitUsers(SDNode *User, unsigned UserOpNo, + unsigned Bits, + const TargetInstrInfo *TII) { + const RISCVVPseudosTable::PseudoInfo *PseudoInfo = + RISCVVPseudosTable::getPseudoInfo(User->getMachineOpcode()); + + if (!PseudoInfo) + return false; + + const MCInstrDesc &MCID = TII->get(User->getMachineOpcode()); + const uint64_t TSFlags = MCID.TSFlags; + if (!RISCVII::hasSEWOp(TSFlags)) + return false; + assert(RISCVII::hasVLOp(TSFlags)); + + bool HasGlueOp = User->getGluedNode() != nullptr; + unsigned ChainOpIdx = User->getNumOperands() - HasGlueOp - 1; + bool HasChainOp = User->getOperand(ChainOpIdx).getValueType() == MVT::Other; + bool HasVecPolicyOp = RISCVII::hasVecPolicyOp(TSFlags); + unsigned VLIdx = + User->getNumOperands() - HasVecPolicyOp - HasChainOp - HasGlueOp - 2; + const unsigned Log2SEW = User->getConstantOperandVal(VLIdx + 1); + + // TODO: The Largest VL 65,536 occurs for LMUL=8 and SEW=8 with + // VLEN=65,536. We could check if Bits < 16 here. + if (UserOpNo == VLIdx) + return false; + + // TODO: Handle Zvbb instructions + switch (PseudoInfo->BaseInstr) { + default: + return false; + + // 11.6. Vector Single-Width Shift Instructions + case RISCV::VSLL_VX: + case RISCV::VSLL_VI: + case RISCV::VSRL_VX: + case RISCV::VSRL_VI: + case RISCV::VSRA_VX: + case RISCV::VSRA_VI: + // 12.4. Vector Single-Width Scaling Shift Instructions + case RISCV::VSSRL_VX: + case RISCV::VSSRL_VI: + case RISCV::VSSRA_VX: + case RISCV::VSSRA_VI: + // Only the low lg2(SEW) bits of the shift-amount value are used. + if (Bits < Log2SEW) + return false; + break; + + // 11.7 Vector Narrowing Integer Right Shift Instructions + case RISCV::VNSRL_WX: + case RISCV::VNSRL_WI: + case RISCV::VNSRA_WX: + case RISCV::VNSRA_WI: + // 12.5. Vector Narrowing Fixed-Point Clip Instructions + case RISCV::VNCLIPU_WX: + case RISCV::VNCLIPU_WI: + case RISCV::VNCLIP_WX: + case RISCV::VNCLIP_WI: + // Only the low lg2(2*SEW) bits of the shift-amount value are used. + if (Bits < Log2SEW + 1) + return false; + break; + + // 11.1. Vector Single-Width Integer Add and Subtract + case RISCV::VADD_VX: + case RISCV::VADD_VI: + case RISCV::VSUB_VX: + case RISCV::VRSUB_VX: + case RISCV::VRSUB_VI: + // 11.2. Vector Widening Integer Add/Subtract + case RISCV::VWADDU_VX: + case RISCV::VWSUBU_VX: + case RISCV::VWADD_VX: + case RISCV::VWSUB_VX: + case RISCV::VWADDU_WX: + case RISCV::VWSUBU_WX: + case RISCV::VWADD_WX: + case RISCV::VWSUB_WX: + // 11.4. Vector Integer Add-with-Carry / Subtract-with-Borrow Instructions + case RISCV::VADC_VXM: + case RISCV::VADC_VIM: + case RISCV::VMADC_VXM: + case RISCV::VMADC_VIM: + case RISCV::VMADC_VX: + case RISCV::VMADC_VI: + case RISCV::VSBC_VXM: + case RISCV::VMSBC_VXM: + case RISCV::VMSBC_VX: + // 11.5 Vector Bitwise Logical Instructions + case RISCV::VAND_VX: + case RISCV::VAND_VI: + case RISCV::VOR_VX: + case RISCV::VOR_VI: + case RISCV::VXOR_VX: + case RISCV::VXOR_VI: + // 11.8. Vector Integer Compare Instructions + case RISCV::VMSEQ_VX: + case RISCV::VMSEQ_VI: + case RISCV::VMSNE_VX: + case RISCV::VMSNE_VI: + case RISCV::VMSLTU_VX: + case RISCV::VMSLT_VX: + case RISCV::VMSLEU_VX: + case RISCV::VMSLEU_VI: + case RISCV::VMSLE_VX: + case RISCV::VMSLE_VI: + case RISCV::VMSGTU_VX: + case RISCV::VMSGTU_VI: + case RISCV::VMSGT_VX: + case RISCV::VMSGT_VI: + // 11.9. Vector Integer Min/Max Instructions + case RISCV::VMINU_VX: + case RISCV::VMIN_VX: + case RISCV::VMAXU_VX: + case RISCV::VMAX_VX: + // 11.10. Vector Single-Width Integer Multiply Instructions + case RISCV::VMUL_VX: + case RISCV::VMULH_VX: + case RISCV::VMULHU_VX: + case RISCV::VMULHSU_VX: + // 11.11. Vector Integer Divide Instructions + case RISCV::VDIVU_VX: + case RISCV::VDIV_VX: + case RISCV::VREMU_VX: + case RISCV::VREM_VX: + // 11.12. Vector Widening Integer Multiply Instructions + case RISCV::VWMUL_VX: + case RISCV::VWMULU_VX: + case RISCV::VWMULSU_VX: + // 11.13. Vector Single-Width Integer Multiply-Add Instructions + case RISCV::VMACC_VX: + case RISCV::VNMSAC_VX: + case RISCV::VMADD_VX: + case RISCV::VNMSUB_VX: + // 11.14. Vector Widening Integer Multiply-Add Instructions + case RISCV::VWMACCU_VX: + case RISCV::VWMACC_VX: + case RISCV::VWMACCSU_VX: + case RISCV::VWMACCUS_VX: + // 11.15. Vector Integer Merge Instructions + case RISCV::VMERGE_VXM: + case RISCV::VMERGE_VIM: + // 11.16. Vector Integer Move Instructions + case RISCV::VMV_V_X: + case RISCV::VMV_V_I: + // 12.1. Vector Single-Width Saturating Add and Subtract + case RISCV::VSADDU_VX: + case RISCV::VSADDU_VI: + case RISCV::VSADD_VX: + case RISCV::VSADD_VI: + case RISCV::VSSUBU_VX: + case RISCV::VSSUB_VX: + // 12.2. Vector Single-Width Averaging Add and Subtract + case RISCV::VAADDU_VX: + case RISCV::VAADD_VX: + case RISCV::VASUBU_VX: + case RISCV::VASUB_VX: + // 12.3. Vector Single-Width Fractional Multiply with Rounding and Saturation + case RISCV::VSMUL_VX: + // 16.1. Integer Scalar Move Instructions + case RISCV::VMV_S_X: + if (Bits < (1 << Log2SEW)) + return false; + } + return true; +} + // Return true if all users of this SDNode* only consume the lower \p Bits. // This can be used to form W instructions for add/sub/mul/shl even when the // root isn't a sext_inreg. This can allow the ADDW/SUBW/MULW/SLLIW to CSE if @@ -2783,6 +2952,8 @@ bool RISCVDAGToDAGISel::hasAllNBitUsers(SDNode *Node, unsigned Bits, // TODO: Add more opcodes? switch (User->getMachineOpcode()) { default: + if (vectorPseudoHasAllNBitUsers(User, UI.getOperandNo(), Bits, TII)) + break; return false; case RISCV::ADDW: case RISCV::ADDIW: diff --git a/llvm/lib/Target/RISCV/RISCVOptWInstrs.cpp b/llvm/lib/Target/RISCV/RISCVOptWInstrs.cpp index bd294c669735f..f6353aa723c2f 100644 --- a/llvm/lib/Target/RISCV/RISCVOptWInstrs.cpp +++ b/llvm/lib/Target/RISCV/RISCVOptWInstrs.cpp @@ -77,6 +77,168 @@ FunctionPass *llvm::createRISCVOptWInstrsPass() { return new RISCVOptWInstrs(); } +static bool vectorPseudoHasAllNBitUsers(const MachineOperand &UserOp, + unsigned Bits) { + const MachineInstr &MI = *UserOp.getParent(); + const RISCVVPseudosTable::PseudoInfo *PseudoInfo = + RISCVVPseudosTable::getPseudoInfo(MI.getOpcode()); + + if (!PseudoInfo) + return false; + + const MCInstrDesc &MCID = MI.getDesc(); + const uint64_t TSFlags = MI.getDesc().TSFlags; + if (!RISCVII::hasSEWOp(TSFlags)) + return false; + assert(RISCVII::hasVLOp(TSFlags)); + const unsigned Log2SEW = MI.getOperand(RISCVII::getSEWOpNum(MCID)).getImm(); + + // TODO: The Largest VL 65,536 occurs for LMUL=8 and SEW=8 with + // VLEN=65,536. We could check if Bits < 16 here. + if (UserOp.getOperandNo() == RISCVII::getVLOpNum(MCID)) + return false; + + // TODO: Handle Zvbb instructions + switch (PseudoInfo->BaseInstr) { + default: + return false; + + // 11.6. Vector Single-Width Shift Instructions + case RISCV::VSLL_VX: + case RISCV::VSLL_VI: + case RISCV::VSRL_VX: + case RISCV::VSRL_VI: + case RISCV::VSRA_VX: + case RISCV::VSRA_VI: + // 12.4. Vector Single-Width Scaling Shift Instructions + case RISCV::VSSRL_VX: + case RISCV::VSSRL_VI: + case RISCV::VSSRA_VX: + case RISCV::VSSRA_VI: + // Only the low lg2(SEW) bits of the shift-amount value are used. + if (Bits < Log2SEW) + return false; + break; + + // 11.7 Vector Narrowing Integer Right Shift Instructions + case RISCV::VNSRL_WX: + case RISCV::VNSRL_WI: + case RISCV::VNSRA_WX: + case RISCV::VNSRA_WI: + // 12.5. Vector Narrowing Fixed-Point Clip Instructions + case RISCV::VNCLIPU_WX: + case RISCV::VNCLIPU_WI: + case RISCV::VNCLIP_WX: + case RISCV::VNCLIP_WI: + // Only the low lg2(2*SEW) bits of the shift-amount value are used. + if (Bits < Log2SEW + 1) + return false; + break; + + // 11.1. Vector Single-Width Integer Add and Subtract + case RISCV::VADD_VX: + case RISCV::VADD_VI: + case RISCV::VSUB_VX: + case RISCV::VRSUB_VX: + case RISCV::VRSUB_VI: + // 11.2. Vector Widening Integer Add/Subtract + case RISCV::VWADDU_VX: + case RISCV::VWSUBU_VX: + case RISCV::VWADD_VX: + case RISCV::VWSUB_VX: + case RISCV::VWADDU_WX: + case RISCV::VWSUBU_WX: + case RISCV::VWADD_WX: + case RISCV::VWSUB_WX: + // 11.4. Vector Integer Add-with-Carry / Subtract-with-Borrow Instructions + case RISCV::VADC_VXM: + case RISCV::VADC_VIM: + case RISCV::VMADC_VXM: + case RISCV::VMADC_VIM: + case RISCV::VMADC_VX: + case RISCV::VMADC_VI: + case RISCV::VSBC_VXM: + case RISCV::VMSBC_VXM: + case RISCV::VMSBC_VX: + // 11.5 Vector Bitwise Logical Instructions + case RISCV::VAND_VX: + case RISCV::VAND_VI: + case RISCV::VOR_VX: + case RISCV::VOR_VI: + case RISCV::VXOR_VX: + case RISCV::VXOR_VI: + // 11.8. Vector Integer Compare Instructions + case RISCV::VMSEQ_VX: + case RISCV::VMSEQ_VI: + case RISCV::VMSNE_VX: + case RISCV::VMSNE_VI: + case RISCV::VMSLTU_VX: + case RISCV::VMSLT_VX: + case RISCV::VMSLEU_VX: + case RISCV::VMSLEU_VI: + case RISCV::VMSLE_VX: + case RISCV::VMSLE_VI: + case RISCV::VMSGTU_VX: + case RISCV::VMSGTU_VI: + case RISCV::VMSGT_VX: + case RISCV::VMSGT_VI: + // 11.9. Vector Integer Min/Max Instructions + case RISCV::VMINU_VX: + case RISCV::VMIN_VX: + case RISCV::VMAXU_VX: + case RISCV::VMAX_VX: + // 11.10. Vector Single-Width Integer Multiply Instructions + case RISCV::VMUL_VX: + case RISCV::VMULH_VX: + case RISCV::VMULHU_VX: + case RISCV::VMULHSU_VX: + // 11.11. Vector Integer Divide Instructions + case RISCV::VDIVU_VX: + case RISCV::VDIV_VX: + case RISCV::VREMU_VX: + case RISCV::VREM_VX: + // 11.12. Vector Widening Integer Multiply Instructions + case RISCV::VWMUL_VX: + case RISCV::VWMULU_VX: + case RISCV::VWMULSU_VX: + // 11.13. Vector Single-Width Integer Multiply-Add Instructions + case RISCV::VMACC_VX: + case RISCV::VNMSAC_VX: + case RISCV::VMADD_VX: + case RISCV::VNMSUB_VX: + // 11.14. Vector Widening Integer Multiply-Add Instructions + case RISCV::VWMACCU_VX: + case RISCV::VWMACC_VX: + case RISCV::VWMACCSU_VX: + case RISCV::VWMACCUS_VX: + // 11.15. Vector Integer Merge Instructions + case RISCV::VMERGE_VXM: + case RISCV::VMERGE_VIM: + // 11.16. Vector Integer Move Instructions + case RISCV::VMV_V_X: + case RISCV::VMV_V_I: + // 12.1. Vector Single-Width Saturating Add and Subtract + case RISCV::VSADDU_VX: + case RISCV::VSADDU_VI: + case RISCV::VSADD_VX: + case RISCV::VSADD_VI: + case RISCV::VSSUBU_VX: + case RISCV::VSSUB_VX: + // 12.2. Vector Single-Width Averaging Add and Subtract + case RISCV::VAADDU_VX: + case RISCV::VAADD_VX: + case RISCV::VASUBU_VX: + case RISCV::VASUB_VX: + // 12.3. Vector Single-Width Fractional Multiply with Rounding and Saturation + case RISCV::VSMUL_VX: + // 16.1. Integer Scalar Move Instructions + case RISCV::VMV_S_X: + if (Bits < (1 << Log2SEW)) + return false; + } + return true; +} + // Checks if all users only demand the lower \p OrigBits of the original // instruction's result. // TODO: handle multiple interdependent transformations @@ -107,6 +269,8 @@ static bool hasAllNBitUsers(const MachineInstr &OrigMI, switch (UserMI->getOpcode()) { default: + if (vectorPseudoHasAllNBitUsers(UserOp, Bits)) + break; return false; case RISCV::ADDIW: diff --git a/llvm/test/CodeGen/RISCV/rvv/constant-folding.ll b/llvm/test/CodeGen/RISCV/rvv/constant-folding.ll index e3a878052ee19..98bc4081b3a34 100644 --- a/llvm/test/CodeGen/RISCV/rvv/constant-folding.ll +++ b/llvm/test/CodeGen/RISCV/rvv/constant-folding.ll @@ -14,26 +14,15 @@ ; a constant SPLAT_VECTOR didn't follow suit. define <2 x i16> @fixedlen(<2 x i32> %x) { -; RV32-LABEL: fixedlen: -; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma -; RV32-NEXT: vsrl.vi v8, v8, 16 -; RV32-NEXT: lui a0, 1048568 -; RV32-NEXT: vand.vx v8, v8, a0 -; RV32-NEXT: vsetvli zero, zero, e16, mf4, ta, ma -; RV32-NEXT: vnsrl.wi v8, v8, 0 -; RV32-NEXT: ret -; -; RV64-LABEL: fixedlen: -; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, ma -; RV64-NEXT: vsrl.vi v8, v8, 16 -; RV64-NEXT: lui a0, 131071 -; RV64-NEXT: slli a0, a0, 3 -; RV64-NEXT: vand.vx v8, v8, a0 -; RV64-NEXT: vsetvli zero, zero, e16, mf4, ta, ma -; RV64-NEXT: vnsrl.wi v8, v8, 0 -; RV64-NEXT: ret +; CHECK-LABEL: fixedlen: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma +; CHECK-NEXT: vsrl.vi v8, v8, 16 +; CHECK-NEXT: lui a0, 1048568 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma +; CHECK-NEXT: vnsrl.wi v8, v8, 0 +; CHECK-NEXT: ret %v41 = insertelement <2 x i32> poison, i32 16, i32 0 %v42 = shufflevector <2 x i32> %v41, <2 x i32> poison, <2 x i32> zeroinitializer %v43 = lshr <2 x i32> %x, %v42 @@ -63,3 +52,6 @@ define @scalable( %x) { %v48 = and %v44, %v47 ret %v48 } +;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: +; RV32: {{.*}} +; RV64: {{.*}} diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-deinterleave-load.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-deinterleave-load.ll index 3167bcf26837b..8e298d962edf1 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-deinterleave-load.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-deinterleave-load.ll @@ -5,67 +5,35 @@ ; Integers define {<16 x i1>, <16 x i1>} @vector_deinterleave_load_v16i1_v32i1(ptr %p) { -; RV32-LABEL: vector_deinterleave_load_v16i1_v32i1: -; RV32: # %bb.0: -; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; RV32-NEXT: vlm.v v0, (a0) -; RV32-NEXT: vsetivli zero, 16, e8, m1, ta, ma -; RV32-NEXT: vmv.v.i v8, 0 -; RV32-NEXT: vmerge.vim v10, v8, 1, v0 -; RV32-NEXT: vid.v v9 -; RV32-NEXT: vadd.vv v11, v9, v9 -; RV32-NEXT: vrgather.vv v9, v10, v11 -; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma -; RV32-NEXT: vslidedown.vi v0, v0, 2 -; RV32-NEXT: vsetivli zero, 16, e8, m1, ta, ma -; RV32-NEXT: vmerge.vim v8, v8, 1, v0 -; RV32-NEXT: vadd.vi v12, v11, -16 -; RV32-NEXT: lui a0, 16 -; RV32-NEXT: addi a0, a0, -256 -; RV32-NEXT: vsetivli zero, 1, e16, mf4, ta, ma -; RV32-NEXT: vmv.s.x v0, a0 -; RV32-NEXT: vsetivli zero, 16, e8, m1, ta, mu -; RV32-NEXT: vrgather.vv v9, v8, v12, v0.t -; RV32-NEXT: vmsne.vi v9, v9, 0 -; RV32-NEXT: vadd.vi v12, v11, 1 -; RV32-NEXT: vrgather.vv v13, v10, v12 -; RV32-NEXT: vadd.vi v10, v11, -15 -; RV32-NEXT: vrgather.vv v13, v8, v10, v0.t -; RV32-NEXT: vmsne.vi v8, v13, 0 -; RV32-NEXT: vmv.v.v v0, v9 -; RV32-NEXT: ret -; -; RV64-LABEL: vector_deinterleave_load_v16i1_v32i1: -; RV64: # %bb.0: -; RV64-NEXT: li a1, 32 -; RV64-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; RV64-NEXT: vlm.v v0, (a0) -; RV64-NEXT: vsetivli zero, 16, e8, m1, ta, ma -; RV64-NEXT: vmv.v.i v8, 0 -; RV64-NEXT: vmerge.vim v10, v8, 1, v0 -; RV64-NEXT: vid.v v9 -; RV64-NEXT: vadd.vv v11, v9, v9 -; RV64-NEXT: vrgather.vv v9, v10, v11 -; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma -; RV64-NEXT: vslidedown.vi v0, v0, 2 -; RV64-NEXT: vsetivli zero, 16, e8, m1, ta, ma -; RV64-NEXT: vmerge.vim v8, v8, 1, v0 -; RV64-NEXT: vadd.vi v12, v11, -16 -; RV64-NEXT: lui a0, 16 -; RV64-NEXT: addiw a0, a0, -256 -; RV64-NEXT: vsetivli zero, 1, e16, mf4, ta, ma -; RV64-NEXT: vmv.s.x v0, a0 -; RV64-NEXT: vsetivli zero, 16, e8, m1, ta, mu -; RV64-NEXT: vrgather.vv v9, v8, v12, v0.t -; RV64-NEXT: vmsne.vi v9, v9, 0 -; RV64-NEXT: vadd.vi v12, v11, 1 -; RV64-NEXT: vrgather.vv v13, v10, v12 -; RV64-NEXT: vadd.vi v10, v11, -15 -; RV64-NEXT: vrgather.vv v13, v8, v10, v0.t -; RV64-NEXT: vmsne.vi v8, v13, 0 -; RV64-NEXT: vmv.v.v v0, v9 -; RV64-NEXT: ret +; CHECK-LABEL: vector_deinterleave_load_v16i1_v32i1: +; CHECK: # %bb.0: +; CHECK-NEXT: li a1, 32 +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vlm.v v0, (a0) +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma +; CHECK-NEXT: vmv.v.i v8, 0 +; CHECK-NEXT: vmerge.vim v10, v8, 1, v0 +; CHECK-NEXT: vid.v v9 +; CHECK-NEXT: vadd.vv v11, v9, v9 +; CHECK-NEXT: vrgather.vv v9, v10, v11 +; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma +; CHECK-NEXT: vslidedown.vi v0, v0, 2 +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma +; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 +; CHECK-NEXT: vadd.vi v12, v11, -16 +; CHECK-NEXT: li a0, -256 +; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma +; CHECK-NEXT: vmv.s.x v0, a0 +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vrgather.vv v9, v8, v12, v0.t +; CHECK-NEXT: vmsne.vi v9, v9, 0 +; CHECK-NEXT: vadd.vi v12, v11, 1 +; CHECK-NEXT: vrgather.vv v13, v10, v12 +; CHECK-NEXT: vadd.vi v10, v11, -15 +; CHECK-NEXT: vrgather.vv v13, v8, v10, v0.t +; CHECK-NEXT: vmsne.vi v8, v13, 0 +; CHECK-NEXT: vmv.v.v v0, v9 +; CHECK-NEXT: ret %vec = load <32 x i1>, ptr %p %retval = call {<16 x i1>, <16 x i1>} @llvm.experimental.vector.deinterleave2.v32i1(<32 x i1> %vec) ret {<16 x i1>, <16 x i1>} %retval @@ -211,3 +179,6 @@ declare {<2 x float>, <2 x float>} @llvm.experimental.vector.deinterleave2.v4f32 declare {<8 x half>, <8 x half>} @llvm.experimental.vector.deinterleave2.v16f16(<16 x half>) declare {<4 x float>, <4 x float>} @llvm.experimental.vector.deinterleave2.v8f32(<8 x float>) declare {<2 x double>, <2 x double>} @llvm.experimental.vector.deinterleave2.v4f64(<4 x double>) +;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: +; RV32: {{.*}} +; RV64: {{.*}} diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int-vp.ll index f0a359c13ce5d..742002bda8a9e 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int-vp.ll @@ -585,8 +585,7 @@ define signext i32 @vpreduce_umax_v2i32(i32 signext %s, <2 x i32> %v, <2 x i1> % ; ; RV64-LABEL: vpreduce_umax_v2i32: ; RV64: # %bb.0: -; RV64-NEXT: slli a0, a0, 32 -; RV64-NEXT: srli a0, a0, 32 +; RV64-NEXT: andi a0, a0, -1 ; RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; RV64-NEXT: vmv.s.x v9, a0 ; RV64-NEXT: vsetvli zero, a1, e32, mf2, ta, ma @@ -626,8 +625,7 @@ define signext i32 @vpreduce_umin_v2i32(i32 signext %s, <2 x i32> %v, <2 x i1> % ; ; RV64-LABEL: vpreduce_umin_v2i32: ; RV64: # %bb.0: -; RV64-NEXT: slli a0, a0, 32 -; RV64-NEXT: srli a0, a0, 32 +; RV64-NEXT: andi a0, a0, -1 ; RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; RV64-NEXT: vmv.s.x v9, a0 ; RV64-NEXT: vsetvli zero, a1, e32, mf2, ta, ma @@ -727,8 +725,7 @@ define signext i32 @vpreduce_umax_v4i32(i32 signext %s, <4 x i32> %v, <4 x i1> % ; ; RV64-LABEL: vpreduce_umax_v4i32: ; RV64: # %bb.0: -; RV64-NEXT: slli a0, a0, 32 -; RV64-NEXT: srli a0, a0, 32 +; RV64-NEXT: andi a0, a0, -1 ; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64-NEXT: vmv.s.x v9, a0 ; RV64-NEXT: vsetvli zero, a1, e32, m1, ta, ma @@ -768,8 +765,7 @@ define signext i32 @vpreduce_umin_v4i32(i32 signext %s, <4 x i32> %v, <4 x i1> % ; ; RV64-LABEL: vpreduce_umin_v4i32: ; RV64: # %bb.0: -; RV64-NEXT: slli a0, a0, 32 -; RV64-NEXT: srli a0, a0, 32 +; RV64-NEXT: andi a0, a0, -1 ; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64-NEXT: vmv.s.x v9, a0 ; RV64-NEXT: vsetvli zero, a1, e32, m1, ta, ma diff --git a/llvm/test/CodeGen/RISCV/rvv/named-vector-shuffle-reverse.ll b/llvm/test/CodeGen/RISCV/rvv/named-vector-shuffle-reverse.ll index be8281181cd2d..82c9f405c2392 100644 --- a/llvm/test/CodeGen/RISCV/rvv/named-vector-shuffle-reverse.ll +++ b/llvm/test/CodeGen/RISCV/rvv/named-vector-shuffle-reverse.ll @@ -1,10 +1,10 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+f,+d,+zfh,+zvfh -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32-BITS-UNKNOWN -; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+f,+d,+zfh,+zvfh -riscv-v-vector-bits-max=256 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32-BITS-256 -; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+f,+d,+zfh,+zvfh -riscv-v-vector-bits-max=512 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32-BITS-512 -; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+f,+d,+zfh,+zvfh -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64-BITS-UNKNOWN -; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+f,+d,+zfh,+zvfh -riscv-v-vector-bits-max=256 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64-BITS-256 -; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+f,+d,+zfh,+zvfh -riscv-v-vector-bits-max=512 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64-BITS-512 +; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+f,+d,+zfh,+zvfh -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32,RV32-BITS-UNKNOWN +; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+f,+d,+zfh,+zvfh -riscv-v-vector-bits-max=256 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32,RV32-BITS-256 +; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+f,+d,+zfh,+zvfh -riscv-v-vector-bits-max=512 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32,RV32-BITS-512 +; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+f,+d,+zfh,+zvfh -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64,RV64-BITS-UNKNOWN +; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+f,+d,+zfh,+zvfh -riscv-v-vector-bits-max=256 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64,RV64-BITS-256 +; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+f,+d,+zfh,+zvfh -riscv-v-vector-bits-max=512 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64,RV64-BITS-512 ; ; VECTOR_REVERSE - masks @@ -65,7 +65,7 @@ define @reverse_nxv2i1( %a) { ; RV64-BITS-UNKNOWN-NEXT: vmerge.vim v8, v8, 1, v0 ; RV64-BITS-UNKNOWN-NEXT: csrr a0, vlenb ; RV64-BITS-UNKNOWN-NEXT: srli a0, a0, 2 -; RV64-BITS-UNKNOWN-NEXT: addi a0, a0, -1 +; RV64-BITS-UNKNOWN-NEXT: addiw a0, a0, -1 ; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; RV64-BITS-UNKNOWN-NEXT: vid.v v9 ; RV64-BITS-UNKNOWN-NEXT: vrsub.vx v9, v9, a0 @@ -82,7 +82,7 @@ define @reverse_nxv2i1( %a) { ; RV64-BITS-256-NEXT: vmerge.vim v8, v8, 1, v0 ; RV64-BITS-256-NEXT: csrr a0, vlenb ; RV64-BITS-256-NEXT: srli a0, a0, 2 -; RV64-BITS-256-NEXT: addi a0, a0, -1 +; RV64-BITS-256-NEXT: addiw a0, a0, -1 ; RV64-BITS-256-NEXT: vid.v v9 ; RV64-BITS-256-NEXT: vrsub.vx v9, v9, a0 ; RV64-BITS-256-NEXT: vrgather.vv v10, v8, v9 @@ -97,7 +97,7 @@ define @reverse_nxv2i1( %a) { ; RV64-BITS-512-NEXT: vmerge.vim v8, v8, 1, v0 ; RV64-BITS-512-NEXT: csrr a0, vlenb ; RV64-BITS-512-NEXT: srli a0, a0, 2 -; RV64-BITS-512-NEXT: addi a0, a0, -1 +; RV64-BITS-512-NEXT: addiw a0, a0, -1 ; RV64-BITS-512-NEXT: vid.v v9 ; RV64-BITS-512-NEXT: vrsub.vx v9, v9, a0 ; RV64-BITS-512-NEXT: vrgather.vv v10, v8, v9 @@ -163,7 +163,7 @@ define @reverse_nxv4i1( %a) { ; RV64-BITS-UNKNOWN-NEXT: vmerge.vim v8, v8, 1, v0 ; RV64-BITS-UNKNOWN-NEXT: csrr a0, vlenb ; RV64-BITS-UNKNOWN-NEXT: srli a0, a0, 1 -; RV64-BITS-UNKNOWN-NEXT: addi a0, a0, -1 +; RV64-BITS-UNKNOWN-NEXT: addiw a0, a0, -1 ; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; RV64-BITS-UNKNOWN-NEXT: vid.v v9 ; RV64-BITS-UNKNOWN-NEXT: vrsub.vx v9, v9, a0 @@ -180,7 +180,7 @@ define @reverse_nxv4i1( %a) { ; RV64-BITS-256-NEXT: vmerge.vim v8, v8, 1, v0 ; RV64-BITS-256-NEXT: csrr a0, vlenb ; RV64-BITS-256-NEXT: srli a0, a0, 1 -; RV64-BITS-256-NEXT: addi a0, a0, -1 +; RV64-BITS-256-NEXT: addiw a0, a0, -1 ; RV64-BITS-256-NEXT: vid.v v9 ; RV64-BITS-256-NEXT: vrsub.vx v9, v9, a0 ; RV64-BITS-256-NEXT: vrgather.vv v10, v8, v9 @@ -195,7 +195,7 @@ define @reverse_nxv4i1( %a) { ; RV64-BITS-512-NEXT: vmerge.vim v8, v8, 1, v0 ; RV64-BITS-512-NEXT: csrr a0, vlenb ; RV64-BITS-512-NEXT: srli a0, a0, 1 -; RV64-BITS-512-NEXT: addi a0, a0, -1 +; RV64-BITS-512-NEXT: addiw a0, a0, -1 ; RV64-BITS-512-NEXT: vid.v v9 ; RV64-BITS-512-NEXT: vrsub.vx v9, v9, a0 ; RV64-BITS-512-NEXT: vrgather.vv v10, v8, v9 @@ -257,7 +257,7 @@ define @reverse_nxv8i1( %a) { ; RV64-BITS-UNKNOWN-NEXT: vmv.v.i v8, 0 ; RV64-BITS-UNKNOWN-NEXT: vmerge.vim v8, v8, 1, v0 ; RV64-BITS-UNKNOWN-NEXT: csrr a0, vlenb -; RV64-BITS-UNKNOWN-NEXT: addi a0, a0, -1 +; RV64-BITS-UNKNOWN-NEXT: addiw a0, a0, -1 ; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e16, m2, ta, ma ; RV64-BITS-UNKNOWN-NEXT: vid.v v10 ; RV64-BITS-UNKNOWN-NEXT: vrsub.vx v10, v10, a0 @@ -273,7 +273,7 @@ define @reverse_nxv8i1( %a) { ; RV64-BITS-256-NEXT: vmv.v.i v8, 0 ; RV64-BITS-256-NEXT: vmerge.vim v8, v8, 1, v0 ; RV64-BITS-256-NEXT: csrr a0, vlenb -; RV64-BITS-256-NEXT: addi a0, a0, -1 +; RV64-BITS-256-NEXT: addiw a0, a0, -1 ; RV64-BITS-256-NEXT: vid.v v9 ; RV64-BITS-256-NEXT: vrsub.vx v9, v9, a0 ; RV64-BITS-256-NEXT: vrgather.vv v10, v8, v9 @@ -287,7 +287,7 @@ define @reverse_nxv8i1( %a) { ; RV64-BITS-512-NEXT: vmv.v.i v8, 0 ; RV64-BITS-512-NEXT: vmerge.vim v8, v8, 1, v0 ; RV64-BITS-512-NEXT: csrr a0, vlenb -; RV64-BITS-512-NEXT: addi a0, a0, -1 +; RV64-BITS-512-NEXT: addiw a0, a0, -1 ; RV64-BITS-512-NEXT: vid.v v9 ; RV64-BITS-512-NEXT: vrsub.vx v9, v9, a0 ; RV64-BITS-512-NEXT: vrgather.vv v10, v8, v9 @@ -353,7 +353,7 @@ define @reverse_nxv16i1( %a) { ; RV64-BITS-UNKNOWN-NEXT: vmerge.vim v8, v8, 1, v0 ; RV64-BITS-UNKNOWN-NEXT: csrr a0, vlenb ; RV64-BITS-UNKNOWN-NEXT: slli a0, a0, 1 -; RV64-BITS-UNKNOWN-NEXT: addi a0, a0, -1 +; RV64-BITS-UNKNOWN-NEXT: addiw a0, a0, -1 ; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e16, m4, ta, ma ; RV64-BITS-UNKNOWN-NEXT: vid.v v12 ; RV64-BITS-UNKNOWN-NEXT: vrsub.vx v12, v12, a0 @@ -370,7 +370,7 @@ define @reverse_nxv16i1( %a) { ; RV64-BITS-256-NEXT: vmerge.vim v8, v8, 1, v0 ; RV64-BITS-256-NEXT: csrr a0, vlenb ; RV64-BITS-256-NEXT: slli a0, a0, 1 -; RV64-BITS-256-NEXT: addi a0, a0, -1 +; RV64-BITS-256-NEXT: addiw a0, a0, -1 ; RV64-BITS-256-NEXT: vid.v v10 ; RV64-BITS-256-NEXT: vrsub.vx v10, v10, a0 ; RV64-BITS-256-NEXT: vrgather.vv v12, v8, v10 @@ -385,7 +385,7 @@ define @reverse_nxv16i1( %a) { ; RV64-BITS-512-NEXT: vmerge.vim v8, v8, 1, v0 ; RV64-BITS-512-NEXT: csrr a0, vlenb ; RV64-BITS-512-NEXT: slli a0, a0, 1 -; RV64-BITS-512-NEXT: addi a0, a0, -1 +; RV64-BITS-512-NEXT: addiw a0, a0, -1 ; RV64-BITS-512-NEXT: vid.v v10 ; RV64-BITS-512-NEXT: vrsub.vx v10, v10, a0 ; RV64-BITS-512-NEXT: vrgather.vv v12, v8, v10 @@ -451,7 +451,7 @@ define @reverse_nxv32i1( %a) { ; RV64-BITS-UNKNOWN-NEXT: vmerge.vim v8, v8, 1, v0 ; RV64-BITS-UNKNOWN-NEXT: csrr a0, vlenb ; RV64-BITS-UNKNOWN-NEXT: slli a0, a0, 2 -; RV64-BITS-UNKNOWN-NEXT: addi a0, a0, -1 +; RV64-BITS-UNKNOWN-NEXT: addiw a0, a0, -1 ; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e16, m8, ta, ma ; RV64-BITS-UNKNOWN-NEXT: vid.v v16 ; RV64-BITS-UNKNOWN-NEXT: vrsub.vx v16, v16, a0 @@ -468,7 +468,7 @@ define @reverse_nxv32i1( %a) { ; RV64-BITS-256-NEXT: vmerge.vim v8, v8, 1, v0 ; RV64-BITS-256-NEXT: csrr a0, vlenb ; RV64-BITS-256-NEXT: slli a0, a0, 2 -; RV64-BITS-256-NEXT: addi a0, a0, -1 +; RV64-BITS-256-NEXT: addiw a0, a0, -1 ; RV64-BITS-256-NEXT: vid.v v12 ; RV64-BITS-256-NEXT: vrsub.vx v12, v12, a0 ; RV64-BITS-256-NEXT: vrgather.vv v16, v8, v12 @@ -483,7 +483,7 @@ define @reverse_nxv32i1( %a) { ; RV64-BITS-512-NEXT: vmerge.vim v8, v8, 1, v0 ; RV64-BITS-512-NEXT: csrr a0, vlenb ; RV64-BITS-512-NEXT: slli a0, a0, 2 -; RV64-BITS-512-NEXT: addi a0, a0, -1 +; RV64-BITS-512-NEXT: addiw a0, a0, -1 ; RV64-BITS-512-NEXT: vid.v v12 ; RV64-BITS-512-NEXT: vrsub.vx v12, v12, a0 ; RV64-BITS-512-NEXT: vrgather.vv v16, v8, v12 @@ -552,7 +552,7 @@ define @reverse_nxv64i1( %a) { ; RV64-BITS-UNKNOWN: # %bb.0: ; RV64-BITS-UNKNOWN-NEXT: csrr a0, vlenb ; RV64-BITS-UNKNOWN-NEXT: slli a0, a0, 2 -; RV64-BITS-UNKNOWN-NEXT: addi a0, a0, -1 +; RV64-BITS-UNKNOWN-NEXT: addiw a0, a0, -1 ; RV64-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, m8, ta, ma ; RV64-BITS-UNKNOWN-NEXT: vid.v v8 ; RV64-BITS-UNKNOWN-NEXT: vrsub.vx v8, v8, a0 @@ -574,7 +574,7 @@ define @reverse_nxv64i1( %a) { ; RV64-BITS-256-NEXT: vmerge.vim v8, v8, 1, v0 ; RV64-BITS-256-NEXT: csrr a0, vlenb ; RV64-BITS-256-NEXT: slli a0, a0, 3 -; RV64-BITS-256-NEXT: addi a0, a0, -1 +; RV64-BITS-256-NEXT: addiw a0, a0, -1 ; RV64-BITS-256-NEXT: vid.v v16 ; RV64-BITS-256-NEXT: vrsub.vx v16, v16, a0 ; RV64-BITS-256-NEXT: vrgather.vv v24, v8, v16 @@ -586,7 +586,7 @@ define @reverse_nxv64i1( %a) { ; RV64-BITS-512: # %bb.0: ; RV64-BITS-512-NEXT: csrr a0, vlenb ; RV64-BITS-512-NEXT: slli a0, a0, 2 -; RV64-BITS-512-NEXT: addi a0, a0, -1 +; RV64-BITS-512-NEXT: addiw a0, a0, -1 ; RV64-BITS-512-NEXT: vsetvli a1, zero, e8, m4, ta, ma ; RV64-BITS-512-NEXT: vid.v v8 ; RV64-BITS-512-NEXT: vrsub.vx v8, v8, a0 @@ -650,7 +650,7 @@ define @reverse_nxv1i8( %a) { ; RV64-BITS-UNKNOWN: # %bb.0: ; RV64-BITS-UNKNOWN-NEXT: csrr a0, vlenb ; RV64-BITS-UNKNOWN-NEXT: srli a0, a0, 3 -; RV64-BITS-UNKNOWN-NEXT: addi a0, a0, -1 +; RV64-BITS-UNKNOWN-NEXT: addiw a0, a0, -1 ; RV64-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, mf4, ta, ma ; RV64-BITS-UNKNOWN-NEXT: vid.v v9 ; RV64-BITS-UNKNOWN-NEXT: vrsub.vx v10, v9, a0 @@ -663,7 +663,7 @@ define @reverse_nxv1i8( %a) { ; RV64-BITS-256: # %bb.0: ; RV64-BITS-256-NEXT: csrr a0, vlenb ; RV64-BITS-256-NEXT: srli a0, a0, 3 -; RV64-BITS-256-NEXT: addi a0, a0, -1 +; RV64-BITS-256-NEXT: addiw a0, a0, -1 ; RV64-BITS-256-NEXT: vsetvli a1, zero, e8, mf8, ta, ma ; RV64-BITS-256-NEXT: vid.v v9 ; RV64-BITS-256-NEXT: vrsub.vx v10, v9, a0 @@ -675,7 +675,7 @@ define @reverse_nxv1i8( %a) { ; RV64-BITS-512: # %bb.0: ; RV64-BITS-512-NEXT: csrr a0, vlenb ; RV64-BITS-512-NEXT: srli a0, a0, 3 -; RV64-BITS-512-NEXT: addi a0, a0, -1 +; RV64-BITS-512-NEXT: addiw a0, a0, -1 ; RV64-BITS-512-NEXT: vsetvli a1, zero, e8, mf8, ta, ma ; RV64-BITS-512-NEXT: vid.v v9 ; RV64-BITS-512-NEXT: vrsub.vx v10, v9, a0 @@ -728,7 +728,7 @@ define @reverse_nxv2i8( %a) { ; RV64-BITS-UNKNOWN: # %bb.0: ; RV64-BITS-UNKNOWN-NEXT: csrr a0, vlenb ; RV64-BITS-UNKNOWN-NEXT: srli a0, a0, 2 -; RV64-BITS-UNKNOWN-NEXT: addi a0, a0, -1 +; RV64-BITS-UNKNOWN-NEXT: addiw a0, a0, -1 ; RV64-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, mf2, ta, ma ; RV64-BITS-UNKNOWN-NEXT: vid.v v9 ; RV64-BITS-UNKNOWN-NEXT: vrsub.vx v10, v9, a0 @@ -741,7 +741,7 @@ define @reverse_nxv2i8( %a) { ; RV64-BITS-256: # %bb.0: ; RV64-BITS-256-NEXT: csrr a0, vlenb ; RV64-BITS-256-NEXT: srli a0, a0, 2 -; RV64-BITS-256-NEXT: addi a0, a0, -1 +; RV64-BITS-256-NEXT: addiw a0, a0, -1 ; RV64-BITS-256-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; RV64-BITS-256-NEXT: vid.v v9 ; RV64-BITS-256-NEXT: vrsub.vx v10, v9, a0 @@ -753,7 +753,7 @@ define @reverse_nxv2i8( %a) { ; RV64-BITS-512: # %bb.0: ; RV64-BITS-512-NEXT: csrr a0, vlenb ; RV64-BITS-512-NEXT: srli a0, a0, 2 -; RV64-BITS-512-NEXT: addi a0, a0, -1 +; RV64-BITS-512-NEXT: addiw a0, a0, -1 ; RV64-BITS-512-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; RV64-BITS-512-NEXT: vid.v v9 ; RV64-BITS-512-NEXT: vrsub.vx v10, v9, a0 @@ -806,7 +806,7 @@ define @reverse_nxv4i8( %a) { ; RV64-BITS-UNKNOWN: # %bb.0: ; RV64-BITS-UNKNOWN-NEXT: csrr a0, vlenb ; RV64-BITS-UNKNOWN-NEXT: srli a0, a0, 1 -; RV64-BITS-UNKNOWN-NEXT: addi a0, a0, -1 +; RV64-BITS-UNKNOWN-NEXT: addiw a0, a0, -1 ; RV64-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, m1, ta, ma ; RV64-BITS-UNKNOWN-NEXT: vid.v v9 ; RV64-BITS-UNKNOWN-NEXT: vrsub.vx v10, v9, a0 @@ -819,7 +819,7 @@ define @reverse_nxv4i8( %a) { ; RV64-BITS-256: # %bb.0: ; RV64-BITS-256-NEXT: csrr a0, vlenb ; RV64-BITS-256-NEXT: srli a0, a0, 1 -; RV64-BITS-256-NEXT: addi a0, a0, -1 +; RV64-BITS-256-NEXT: addiw a0, a0, -1 ; RV64-BITS-256-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; RV64-BITS-256-NEXT: vid.v v9 ; RV64-BITS-256-NEXT: vrsub.vx v10, v9, a0 @@ -831,7 +831,7 @@ define @reverse_nxv4i8( %a) { ; RV64-BITS-512: # %bb.0: ; RV64-BITS-512-NEXT: csrr a0, vlenb ; RV64-BITS-512-NEXT: srli a0, a0, 1 -; RV64-BITS-512-NEXT: addi a0, a0, -1 +; RV64-BITS-512-NEXT: addiw a0, a0, -1 ; RV64-BITS-512-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; RV64-BITS-512-NEXT: vid.v v9 ; RV64-BITS-512-NEXT: vrsub.vx v10, v9, a0 @@ -880,7 +880,7 @@ define @reverse_nxv8i8( %a) { ; RV64-BITS-UNKNOWN-LABEL: reverse_nxv8i8: ; RV64-BITS-UNKNOWN: # %bb.0: ; RV64-BITS-UNKNOWN-NEXT: csrr a0, vlenb -; RV64-BITS-UNKNOWN-NEXT: addi a0, a0, -1 +; RV64-BITS-UNKNOWN-NEXT: addiw a0, a0, -1 ; RV64-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, m2, ta, ma ; RV64-BITS-UNKNOWN-NEXT: vid.v v10 ; RV64-BITS-UNKNOWN-NEXT: vrsub.vx v10, v10, a0 @@ -892,7 +892,7 @@ define @reverse_nxv8i8( %a) { ; RV64-BITS-256-LABEL: reverse_nxv8i8: ; RV64-BITS-256: # %bb.0: ; RV64-BITS-256-NEXT: csrr a0, vlenb -; RV64-BITS-256-NEXT: addi a0, a0, -1 +; RV64-BITS-256-NEXT: addiw a0, a0, -1 ; RV64-BITS-256-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; RV64-BITS-256-NEXT: vid.v v9 ; RV64-BITS-256-NEXT: vrsub.vx v10, v9, a0 @@ -903,7 +903,7 @@ define @reverse_nxv8i8( %a) { ; RV64-BITS-512-LABEL: reverse_nxv8i8: ; RV64-BITS-512: # %bb.0: ; RV64-BITS-512-NEXT: csrr a0, vlenb -; RV64-BITS-512-NEXT: addi a0, a0, -1 +; RV64-BITS-512-NEXT: addiw a0, a0, -1 ; RV64-BITS-512-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; RV64-BITS-512-NEXT: vid.v v9 ; RV64-BITS-512-NEXT: vrsub.vx v10, v9, a0 @@ -956,7 +956,7 @@ define @reverse_nxv16i8( %a) { ; RV64-BITS-UNKNOWN: # %bb.0: ; RV64-BITS-UNKNOWN-NEXT: csrr a0, vlenb ; RV64-BITS-UNKNOWN-NEXT: slli a0, a0, 1 -; RV64-BITS-UNKNOWN-NEXT: addi a0, a0, -1 +; RV64-BITS-UNKNOWN-NEXT: addiw a0, a0, -1 ; RV64-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, m4, ta, ma ; RV64-BITS-UNKNOWN-NEXT: vid.v v12 ; RV64-BITS-UNKNOWN-NEXT: vrsub.vx v12, v12, a0 @@ -969,7 +969,7 @@ define @reverse_nxv16i8( %a) { ; RV64-BITS-256: # %bb.0: ; RV64-BITS-256-NEXT: csrr a0, vlenb ; RV64-BITS-256-NEXT: slli a0, a0, 1 -; RV64-BITS-256-NEXT: addi a0, a0, -1 +; RV64-BITS-256-NEXT: addiw a0, a0, -1 ; RV64-BITS-256-NEXT: vsetvli a1, zero, e8, m2, ta, ma ; RV64-BITS-256-NEXT: vid.v v10 ; RV64-BITS-256-NEXT: vrsub.vx v12, v10, a0 @@ -981,7 +981,7 @@ define @reverse_nxv16i8( %a) { ; RV64-BITS-512: # %bb.0: ; RV64-BITS-512-NEXT: csrr a0, vlenb ; RV64-BITS-512-NEXT: slli a0, a0, 1 -; RV64-BITS-512-NEXT: addi a0, a0, -1 +; RV64-BITS-512-NEXT: addiw a0, a0, -1 ; RV64-BITS-512-NEXT: vsetvli a1, zero, e8, m2, ta, ma ; RV64-BITS-512-NEXT: vid.v v10 ; RV64-BITS-512-NEXT: vrsub.vx v12, v10, a0 @@ -1034,7 +1034,7 @@ define @reverse_nxv32i8( %a) { ; RV64-BITS-UNKNOWN: # %bb.0: ; RV64-BITS-UNKNOWN-NEXT: csrr a0, vlenb ; RV64-BITS-UNKNOWN-NEXT: slli a0, a0, 2 -; RV64-BITS-UNKNOWN-NEXT: addi a0, a0, -1 +; RV64-BITS-UNKNOWN-NEXT: addiw a0, a0, -1 ; RV64-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, m8, ta, ma ; RV64-BITS-UNKNOWN-NEXT: vid.v v16 ; RV64-BITS-UNKNOWN-NEXT: vrsub.vx v16, v16, a0 @@ -1047,7 +1047,7 @@ define @reverse_nxv32i8( %a) { ; RV64-BITS-256: # %bb.0: ; RV64-BITS-256-NEXT: csrr a0, vlenb ; RV64-BITS-256-NEXT: slli a0, a0, 2 -; RV64-BITS-256-NEXT: addi a0, a0, -1 +; RV64-BITS-256-NEXT: addiw a0, a0, -1 ; RV64-BITS-256-NEXT: vsetvli a1, zero, e8, m4, ta, ma ; RV64-BITS-256-NEXT: vid.v v12 ; RV64-BITS-256-NEXT: vrsub.vx v16, v12, a0 @@ -1059,7 +1059,7 @@ define @reverse_nxv32i8( %a) { ; RV64-BITS-512: # %bb.0: ; RV64-BITS-512-NEXT: csrr a0, vlenb ; RV64-BITS-512-NEXT: slli a0, a0, 2 -; RV64-BITS-512-NEXT: addi a0, a0, -1 +; RV64-BITS-512-NEXT: addiw a0, a0, -1 ; RV64-BITS-512-NEXT: vsetvli a1, zero, e8, m4, ta, ma ; RV64-BITS-512-NEXT: vid.v v12 ; RV64-BITS-512-NEXT: vrsub.vx v16, v12, a0 @@ -1114,7 +1114,7 @@ define @reverse_nxv64i8( %a) { ; RV64-BITS-UNKNOWN: # %bb.0: ; RV64-BITS-UNKNOWN-NEXT: csrr a0, vlenb ; RV64-BITS-UNKNOWN-NEXT: slli a0, a0, 2 -; RV64-BITS-UNKNOWN-NEXT: addi a0, a0, -1 +; RV64-BITS-UNKNOWN-NEXT: addiw a0, a0, -1 ; RV64-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, m8, ta, ma ; RV64-BITS-UNKNOWN-NEXT: vid.v v16 ; RV64-BITS-UNKNOWN-NEXT: vrsub.vx v24, v16, a0 @@ -1128,7 +1128,7 @@ define @reverse_nxv64i8( %a) { ; RV64-BITS-256: # %bb.0: ; RV64-BITS-256-NEXT: csrr a0, vlenb ; RV64-BITS-256-NEXT: slli a0, a0, 3 -; RV64-BITS-256-NEXT: addi a0, a0, -1 +; RV64-BITS-256-NEXT: addiw a0, a0, -1 ; RV64-BITS-256-NEXT: vsetvli a1, zero, e8, m8, ta, ma ; RV64-BITS-256-NEXT: vid.v v16 ; RV64-BITS-256-NEXT: vrsub.vx v24, v16, a0 @@ -1140,7 +1140,7 @@ define @reverse_nxv64i8( %a) { ; RV64-BITS-512: # %bb.0: ; RV64-BITS-512-NEXT: csrr a0, vlenb ; RV64-BITS-512-NEXT: slli a0, a0, 2 -; RV64-BITS-512-NEXT: addi a0, a0, -1 +; RV64-BITS-512-NEXT: addiw a0, a0, -1 ; RV64-BITS-512-NEXT: vsetvli a1, zero, e8, m4, ta, ma ; RV64-BITS-512-NEXT: vid.v v16 ; RV64-BITS-512-NEXT: vrsub.vx v24, v16, a0 @@ -1153,175 +1153,305 @@ define @reverse_nxv64i8( %a) { } define @reverse_nxv1i16( %a) { -; CHECK-LABEL: reverse_nxv1i16: -; CHECK: # %bb.0: -; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: srli a0, a0, 3 -; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma -; CHECK-NEXT: vid.v v9 -; CHECK-NEXT: vrsub.vx v10, v9, a0 -; CHECK-NEXT: vrgather.vv v9, v8, v10 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret +; RV32-LABEL: reverse_nxv1i16: +; RV32: # %bb.0: +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: srli a0, a0, 3 +; RV32-NEXT: addi a0, a0, -1 +; RV32-NEXT: vsetvli a1, zero, e16, mf4, ta, ma +; RV32-NEXT: vid.v v9 +; RV32-NEXT: vrsub.vx v10, v9, a0 +; RV32-NEXT: vrgather.vv v9, v8, v10 +; RV32-NEXT: vmv1r.v v8, v9 +; RV32-NEXT: ret +; +; RV64-LABEL: reverse_nxv1i16: +; RV64: # %bb.0: +; RV64-NEXT: csrr a0, vlenb +; RV64-NEXT: srli a0, a0, 3 +; RV64-NEXT: addiw a0, a0, -1 +; RV64-NEXT: vsetvli a1, zero, e16, mf4, ta, ma +; RV64-NEXT: vid.v v9 +; RV64-NEXT: vrsub.vx v10, v9, a0 +; RV64-NEXT: vrgather.vv v9, v8, v10 +; RV64-NEXT: vmv1r.v v8, v9 +; RV64-NEXT: ret %res = call @llvm.experimental.vector.reverse.nxv1i16( %a) ret %res } define @reverse_nxv2i16( %a) { -; CHECK-LABEL: reverse_nxv2i16: -; CHECK: # %bb.0: -; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: srli a0, a0, 2 -; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma -; CHECK-NEXT: vid.v v9 -; CHECK-NEXT: vrsub.vx v10, v9, a0 -; CHECK-NEXT: vrgather.vv v9, v8, v10 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret +; RV32-LABEL: reverse_nxv2i16: +; RV32: # %bb.0: +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: srli a0, a0, 2 +; RV32-NEXT: addi a0, a0, -1 +; RV32-NEXT: vsetvli a1, zero, e16, mf2, ta, ma +; RV32-NEXT: vid.v v9 +; RV32-NEXT: vrsub.vx v10, v9, a0 +; RV32-NEXT: vrgather.vv v9, v8, v10 +; RV32-NEXT: vmv1r.v v8, v9 +; RV32-NEXT: ret +; +; RV64-LABEL: reverse_nxv2i16: +; RV64: # %bb.0: +; RV64-NEXT: csrr a0, vlenb +; RV64-NEXT: srli a0, a0, 2 +; RV64-NEXT: addiw a0, a0, -1 +; RV64-NEXT: vsetvli a1, zero, e16, mf2, ta, ma +; RV64-NEXT: vid.v v9 +; RV64-NEXT: vrsub.vx v10, v9, a0 +; RV64-NEXT: vrgather.vv v9, v8, v10 +; RV64-NEXT: vmv1r.v v8, v9 +; RV64-NEXT: ret %res = call @llvm.experimental.vector.reverse.nxv2i16( %a) ret %res } define @reverse_nxv4i16( %a) { -; CHECK-LABEL: reverse_nxv4i16: -; CHECK: # %bb.0: -; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: srli a0, a0, 1 -; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma -; CHECK-NEXT: vid.v v9 -; CHECK-NEXT: vrsub.vx v10, v9, a0 -; CHECK-NEXT: vrgather.vv v9, v8, v10 -; CHECK-NEXT: vmv.v.v v8, v9 -; CHECK-NEXT: ret +; RV32-LABEL: reverse_nxv4i16: +; RV32: # %bb.0: +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: srli a0, a0, 1 +; RV32-NEXT: addi a0, a0, -1 +; RV32-NEXT: vsetvli a1, zero, e16, m1, ta, ma +; RV32-NEXT: vid.v v9 +; RV32-NEXT: vrsub.vx v10, v9, a0 +; RV32-NEXT: vrgather.vv v9, v8, v10 +; RV32-NEXT: vmv.v.v v8, v9 +; RV32-NEXT: ret +; +; RV64-LABEL: reverse_nxv4i16: +; RV64: # %bb.0: +; RV64-NEXT: csrr a0, vlenb +; RV64-NEXT: srli a0, a0, 1 +; RV64-NEXT: addiw a0, a0, -1 +; RV64-NEXT: vsetvli a1, zero, e16, m1, ta, ma +; RV64-NEXT: vid.v v9 +; RV64-NEXT: vrsub.vx v10, v9, a0 +; RV64-NEXT: vrgather.vv v9, v8, v10 +; RV64-NEXT: vmv.v.v v8, v9 +; RV64-NEXT: ret %res = call @llvm.experimental.vector.reverse.nxv4i16( %a) ret %res } define @reverse_nxv8i16( %a) { -; CHECK-LABEL: reverse_nxv8i16: -; CHECK: # %bb.0: -; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma -; CHECK-NEXT: vid.v v10 -; CHECK-NEXT: vrsub.vx v12, v10, a0 -; CHECK-NEXT: vrgather.vv v10, v8, v12 -; CHECK-NEXT: vmv.v.v v8, v10 -; CHECK-NEXT: ret +; RV32-LABEL: reverse_nxv8i16: +; RV32: # %bb.0: +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: addi a0, a0, -1 +; RV32-NEXT: vsetvli a1, zero, e16, m2, ta, ma +; RV32-NEXT: vid.v v10 +; RV32-NEXT: vrsub.vx v12, v10, a0 +; RV32-NEXT: vrgather.vv v10, v8, v12 +; RV32-NEXT: vmv.v.v v8, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: reverse_nxv8i16: +; RV64: # %bb.0: +; RV64-NEXT: csrr a0, vlenb +; RV64-NEXT: addiw a0, a0, -1 +; RV64-NEXT: vsetvli a1, zero, e16, m2, ta, ma +; RV64-NEXT: vid.v v10 +; RV64-NEXT: vrsub.vx v12, v10, a0 +; RV64-NEXT: vrgather.vv v10, v8, v12 +; RV64-NEXT: vmv.v.v v8, v10 +; RV64-NEXT: ret %res = call @llvm.experimental.vector.reverse.nxv8i16( %a) ret %res } define @reverse_nxv16i16( %a) { -; CHECK-LABEL: reverse_nxv16i16: -; CHECK: # %bb.0: -; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: slli a0, a0, 1 -; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma -; CHECK-NEXT: vid.v v12 -; CHECK-NEXT: vrsub.vx v16, v12, a0 -; CHECK-NEXT: vrgather.vv v12, v8, v16 -; CHECK-NEXT: vmv.v.v v8, v12 -; CHECK-NEXT: ret +; RV32-LABEL: reverse_nxv16i16: +; RV32: # %bb.0: +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 1 +; RV32-NEXT: addi a0, a0, -1 +; RV32-NEXT: vsetvli a1, zero, e16, m4, ta, ma +; RV32-NEXT: vid.v v12 +; RV32-NEXT: vrsub.vx v16, v12, a0 +; RV32-NEXT: vrgather.vv v12, v8, v16 +; RV32-NEXT: vmv.v.v v8, v12 +; RV32-NEXT: ret +; +; RV64-LABEL: reverse_nxv16i16: +; RV64: # %bb.0: +; RV64-NEXT: csrr a0, vlenb +; RV64-NEXT: slli a0, a0, 1 +; RV64-NEXT: addiw a0, a0, -1 +; RV64-NEXT: vsetvli a1, zero, e16, m4, ta, ma +; RV64-NEXT: vid.v v12 +; RV64-NEXT: vrsub.vx v16, v12, a0 +; RV64-NEXT: vrgather.vv v12, v8, v16 +; RV64-NEXT: vmv.v.v v8, v12 +; RV64-NEXT: ret %res = call @llvm.experimental.vector.reverse.nxv16i16( %a) ret %res } define @reverse_nxv32i16( %a) { -; CHECK-LABEL: reverse_nxv32i16: -; CHECK: # %bb.0: -; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: slli a0, a0, 2 -; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, ma -; CHECK-NEXT: vid.v v16 -; CHECK-NEXT: vrsub.vx v24, v16, a0 -; CHECK-NEXT: vrgather.vv v16, v8, v24 -; CHECK-NEXT: vmv.v.v v8, v16 -; CHECK-NEXT: ret +; RV32-LABEL: reverse_nxv32i16: +; RV32: # %bb.0: +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 2 +; RV32-NEXT: addi a0, a0, -1 +; RV32-NEXT: vsetvli a1, zero, e16, m8, ta, ma +; RV32-NEXT: vid.v v16 +; RV32-NEXT: vrsub.vx v24, v16, a0 +; RV32-NEXT: vrgather.vv v16, v8, v24 +; RV32-NEXT: vmv.v.v v8, v16 +; RV32-NEXT: ret +; +; RV64-LABEL: reverse_nxv32i16: +; RV64: # %bb.0: +; RV64-NEXT: csrr a0, vlenb +; RV64-NEXT: slli a0, a0, 2 +; RV64-NEXT: addiw a0, a0, -1 +; RV64-NEXT: vsetvli a1, zero, e16, m8, ta, ma +; RV64-NEXT: vid.v v16 +; RV64-NEXT: vrsub.vx v24, v16, a0 +; RV64-NEXT: vrgather.vv v16, v8, v24 +; RV64-NEXT: vmv.v.v v8, v16 +; RV64-NEXT: ret %res = call @llvm.experimental.vector.reverse.nxv32i16( %a) ret %res } define @reverse_nxv1i32( %a) { -; CHECK-LABEL: reverse_nxv1i32: -; CHECK: # %bb.0: -; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: srli a0, a0, 3 -; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma -; CHECK-NEXT: vid.v v9 -; CHECK-NEXT: vrsub.vx v10, v9, a0 -; CHECK-NEXT: vrgather.vv v9, v8, v10 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret +; RV32-LABEL: reverse_nxv1i32: +; RV32: # %bb.0: +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: srli a0, a0, 3 +; RV32-NEXT: addi a0, a0, -1 +; RV32-NEXT: vsetvli a1, zero, e32, mf2, ta, ma +; RV32-NEXT: vid.v v9 +; RV32-NEXT: vrsub.vx v10, v9, a0 +; RV32-NEXT: vrgather.vv v9, v8, v10 +; RV32-NEXT: vmv1r.v v8, v9 +; RV32-NEXT: ret +; +; RV64-LABEL: reverse_nxv1i32: +; RV64: # %bb.0: +; RV64-NEXT: csrr a0, vlenb +; RV64-NEXT: srli a0, a0, 3 +; RV64-NEXT: addiw a0, a0, -1 +; RV64-NEXT: vsetvli a1, zero, e32, mf2, ta, ma +; RV64-NEXT: vid.v v9 +; RV64-NEXT: vrsub.vx v10, v9, a0 +; RV64-NEXT: vrgather.vv v9, v8, v10 +; RV64-NEXT: vmv1r.v v8, v9 +; RV64-NEXT: ret %res = call @llvm.experimental.vector.reverse.nxv1i32( %a) ret %res } define @reverse_nxv2i32( %a) { -; CHECK-LABEL: reverse_nxv2i32: -; CHECK: # %bb.0: -; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: srli a0, a0, 2 -; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma -; CHECK-NEXT: vid.v v9 -; CHECK-NEXT: vrsub.vx v10, v9, a0 -; CHECK-NEXT: vrgather.vv v9, v8, v10 -; CHECK-NEXT: vmv.v.v v8, v9 -; CHECK-NEXT: ret +; RV32-LABEL: reverse_nxv2i32: +; RV32: # %bb.0: +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: srli a0, a0, 2 +; RV32-NEXT: addi a0, a0, -1 +; RV32-NEXT: vsetvli a1, zero, e32, m1, ta, ma +; RV32-NEXT: vid.v v9 +; RV32-NEXT: vrsub.vx v10, v9, a0 +; RV32-NEXT: vrgather.vv v9, v8, v10 +; RV32-NEXT: vmv.v.v v8, v9 +; RV32-NEXT: ret +; +; RV64-LABEL: reverse_nxv2i32: +; RV64: # %bb.0: +; RV64-NEXT: csrr a0, vlenb +; RV64-NEXT: srli a0, a0, 2 +; RV64-NEXT: addiw a0, a0, -1 +; RV64-NEXT: vsetvli a1, zero, e32, m1, ta, ma +; RV64-NEXT: vid.v v9 +; RV64-NEXT: vrsub.vx v10, v9, a0 +; RV64-NEXT: vrgather.vv v9, v8, v10 +; RV64-NEXT: vmv.v.v v8, v9 +; RV64-NEXT: ret %res = call @llvm.experimental.vector.reverse.nxv2i32( %a) ret %res } define @reverse_nxv4i32( %a) { -; CHECK-LABEL: reverse_nxv4i32: -; CHECK: # %bb.0: -; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: srli a0, a0, 1 -; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma -; CHECK-NEXT: vid.v v10 -; CHECK-NEXT: vrsub.vx v12, v10, a0 -; CHECK-NEXT: vrgather.vv v10, v8, v12 -; CHECK-NEXT: vmv.v.v v8, v10 -; CHECK-NEXT: ret +; RV32-LABEL: reverse_nxv4i32: +; RV32: # %bb.0: +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: srli a0, a0, 1 +; RV32-NEXT: addi a0, a0, -1 +; RV32-NEXT: vsetvli a1, zero, e32, m2, ta, ma +; RV32-NEXT: vid.v v10 +; RV32-NEXT: vrsub.vx v12, v10, a0 +; RV32-NEXT: vrgather.vv v10, v8, v12 +; RV32-NEXT: vmv.v.v v8, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: reverse_nxv4i32: +; RV64: # %bb.0: +; RV64-NEXT: csrr a0, vlenb +; RV64-NEXT: srli a0, a0, 1 +; RV64-NEXT: addiw a0, a0, -1 +; RV64-NEXT: vsetvli a1, zero, e32, m2, ta, ma +; RV64-NEXT: vid.v v10 +; RV64-NEXT: vrsub.vx v12, v10, a0 +; RV64-NEXT: vrgather.vv v10, v8, v12 +; RV64-NEXT: vmv.v.v v8, v10 +; RV64-NEXT: ret %res = call @llvm.experimental.vector.reverse.nxv4i32( %a) ret %res } define @reverse_nxv8i32( %a) { -; CHECK-LABEL: reverse_nxv8i32: -; CHECK: # %bb.0: -; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma -; CHECK-NEXT: vid.v v12 -; CHECK-NEXT: vrsub.vx v16, v12, a0 -; CHECK-NEXT: vrgather.vv v12, v8, v16 -; CHECK-NEXT: vmv.v.v v8, v12 -; CHECK-NEXT: ret +; RV32-LABEL: reverse_nxv8i32: +; RV32: # %bb.0: +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: addi a0, a0, -1 +; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma +; RV32-NEXT: vid.v v12 +; RV32-NEXT: vrsub.vx v16, v12, a0 +; RV32-NEXT: vrgather.vv v12, v8, v16 +; RV32-NEXT: vmv.v.v v8, v12 +; RV32-NEXT: ret +; +; RV64-LABEL: reverse_nxv8i32: +; RV64: # %bb.0: +; RV64-NEXT: csrr a0, vlenb +; RV64-NEXT: addiw a0, a0, -1 +; RV64-NEXT: vsetvli a1, zero, e32, m4, ta, ma +; RV64-NEXT: vid.v v12 +; RV64-NEXT: vrsub.vx v16, v12, a0 +; RV64-NEXT: vrgather.vv v12, v8, v16 +; RV64-NEXT: vmv.v.v v8, v12 +; RV64-NEXT: ret %res = call @llvm.experimental.vector.reverse.nxv8i32( %a) ret %res } define @reverse_nxv16i32( %a) { -; CHECK-LABEL: reverse_nxv16i32: -; CHECK: # %bb.0: -; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: slli a0, a0, 1 -; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, ma -; CHECK-NEXT: vid.v v16 -; CHECK-NEXT: vrsub.vx v24, v16, a0 -; CHECK-NEXT: vrgather.vv v16, v8, v24 -; CHECK-NEXT: vmv.v.v v8, v16 -; CHECK-NEXT: ret +; RV32-LABEL: reverse_nxv16i32: +; RV32: # %bb.0: +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 1 +; RV32-NEXT: addi a0, a0, -1 +; RV32-NEXT: vsetvli a1, zero, e32, m8, ta, ma +; RV32-NEXT: vid.v v16 +; RV32-NEXT: vrsub.vx v24, v16, a0 +; RV32-NEXT: vrgather.vv v16, v8, v24 +; RV32-NEXT: vmv.v.v v8, v16 +; RV32-NEXT: ret +; +; RV64-LABEL: reverse_nxv16i32: +; RV64: # %bb.0: +; RV64-NEXT: csrr a0, vlenb +; RV64-NEXT: slli a0, a0, 1 +; RV64-NEXT: addiw a0, a0, -1 +; RV64-NEXT: vsetvli a1, zero, e32, m8, ta, ma +; RV64-NEXT: vid.v v16 +; RV64-NEXT: vrsub.vx v24, v16, a0 +; RV64-NEXT: vrgather.vv v16, v8, v24 +; RV64-NEXT: vmv.v.v v8, v16 +; RV64-NEXT: ret %res = call @llvm.experimental.vector.reverse.nxv16i32( %a) ret %res } @@ -1394,175 +1524,305 @@ define @reverse_nxv8i64( %a) { ; define @reverse_nxv1f16( %a) { -; CHECK-LABEL: reverse_nxv1f16: -; CHECK: # %bb.0: -; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: srli a0, a0, 3 -; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma -; CHECK-NEXT: vid.v v9 -; CHECK-NEXT: vrsub.vx v10, v9, a0 -; CHECK-NEXT: vrgather.vv v9, v8, v10 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret +; RV32-LABEL: reverse_nxv1f16: +; RV32: # %bb.0: +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: srli a0, a0, 3 +; RV32-NEXT: addi a0, a0, -1 +; RV32-NEXT: vsetvli a1, zero, e16, mf4, ta, ma +; RV32-NEXT: vid.v v9 +; RV32-NEXT: vrsub.vx v10, v9, a0 +; RV32-NEXT: vrgather.vv v9, v8, v10 +; RV32-NEXT: vmv1r.v v8, v9 +; RV32-NEXT: ret +; +; RV64-LABEL: reverse_nxv1f16: +; RV64: # %bb.0: +; RV64-NEXT: csrr a0, vlenb +; RV64-NEXT: srli a0, a0, 3 +; RV64-NEXT: addiw a0, a0, -1 +; RV64-NEXT: vsetvli a1, zero, e16, mf4, ta, ma +; RV64-NEXT: vid.v v9 +; RV64-NEXT: vrsub.vx v10, v9, a0 +; RV64-NEXT: vrgather.vv v9, v8, v10 +; RV64-NEXT: vmv1r.v v8, v9 +; RV64-NEXT: ret %res = call @llvm.experimental.vector.reverse.nxv1f16( %a) ret %res } define @reverse_nxv2f16( %a) { -; CHECK-LABEL: reverse_nxv2f16: -; CHECK: # %bb.0: -; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: srli a0, a0, 2 -; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma -; CHECK-NEXT: vid.v v9 -; CHECK-NEXT: vrsub.vx v10, v9, a0 -; CHECK-NEXT: vrgather.vv v9, v8, v10 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret +; RV32-LABEL: reverse_nxv2f16: +; RV32: # %bb.0: +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: srli a0, a0, 2 +; RV32-NEXT: addi a0, a0, -1 +; RV32-NEXT: vsetvli a1, zero, e16, mf2, ta, ma +; RV32-NEXT: vid.v v9 +; RV32-NEXT: vrsub.vx v10, v9, a0 +; RV32-NEXT: vrgather.vv v9, v8, v10 +; RV32-NEXT: vmv1r.v v8, v9 +; RV32-NEXT: ret +; +; RV64-LABEL: reverse_nxv2f16: +; RV64: # %bb.0: +; RV64-NEXT: csrr a0, vlenb +; RV64-NEXT: srli a0, a0, 2 +; RV64-NEXT: addiw a0, a0, -1 +; RV64-NEXT: vsetvli a1, zero, e16, mf2, ta, ma +; RV64-NEXT: vid.v v9 +; RV64-NEXT: vrsub.vx v10, v9, a0 +; RV64-NEXT: vrgather.vv v9, v8, v10 +; RV64-NEXT: vmv1r.v v8, v9 +; RV64-NEXT: ret %res = call @llvm.experimental.vector.reverse.nxv2f16( %a) ret %res } define @reverse_nxv4f16( %a) { -; CHECK-LABEL: reverse_nxv4f16: -; CHECK: # %bb.0: -; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: srli a0, a0, 1 -; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma -; CHECK-NEXT: vid.v v9 -; CHECK-NEXT: vrsub.vx v10, v9, a0 -; CHECK-NEXT: vrgather.vv v9, v8, v10 -; CHECK-NEXT: vmv.v.v v8, v9 -; CHECK-NEXT: ret +; RV32-LABEL: reverse_nxv4f16: +; RV32: # %bb.0: +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: srli a0, a0, 1 +; RV32-NEXT: addi a0, a0, -1 +; RV32-NEXT: vsetvli a1, zero, e16, m1, ta, ma +; RV32-NEXT: vid.v v9 +; RV32-NEXT: vrsub.vx v10, v9, a0 +; RV32-NEXT: vrgather.vv v9, v8, v10 +; RV32-NEXT: vmv.v.v v8, v9 +; RV32-NEXT: ret +; +; RV64-LABEL: reverse_nxv4f16: +; RV64: # %bb.0: +; RV64-NEXT: csrr a0, vlenb +; RV64-NEXT: srli a0, a0, 1 +; RV64-NEXT: addiw a0, a0, -1 +; RV64-NEXT: vsetvli a1, zero, e16, m1, ta, ma +; RV64-NEXT: vid.v v9 +; RV64-NEXT: vrsub.vx v10, v9, a0 +; RV64-NEXT: vrgather.vv v9, v8, v10 +; RV64-NEXT: vmv.v.v v8, v9 +; RV64-NEXT: ret %res = call @llvm.experimental.vector.reverse.nxv4f16( %a) ret %res } define @reverse_nxv8f16( %a) { -; CHECK-LABEL: reverse_nxv8f16: -; CHECK: # %bb.0: -; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma -; CHECK-NEXT: vid.v v10 -; CHECK-NEXT: vrsub.vx v12, v10, a0 -; CHECK-NEXT: vrgather.vv v10, v8, v12 -; CHECK-NEXT: vmv.v.v v8, v10 -; CHECK-NEXT: ret +; RV32-LABEL: reverse_nxv8f16: +; RV32: # %bb.0: +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: addi a0, a0, -1 +; RV32-NEXT: vsetvli a1, zero, e16, m2, ta, ma +; RV32-NEXT: vid.v v10 +; RV32-NEXT: vrsub.vx v12, v10, a0 +; RV32-NEXT: vrgather.vv v10, v8, v12 +; RV32-NEXT: vmv.v.v v8, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: reverse_nxv8f16: +; RV64: # %bb.0: +; RV64-NEXT: csrr a0, vlenb +; RV64-NEXT: addiw a0, a0, -1 +; RV64-NEXT: vsetvli a1, zero, e16, m2, ta, ma +; RV64-NEXT: vid.v v10 +; RV64-NEXT: vrsub.vx v12, v10, a0 +; RV64-NEXT: vrgather.vv v10, v8, v12 +; RV64-NEXT: vmv.v.v v8, v10 +; RV64-NEXT: ret %res = call @llvm.experimental.vector.reverse.nxv8f16( %a) ret %res } define @reverse_nxv16f16( %a) { -; CHECK-LABEL: reverse_nxv16f16: -; CHECK: # %bb.0: -; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: slli a0, a0, 1 -; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma -; CHECK-NEXT: vid.v v12 -; CHECK-NEXT: vrsub.vx v16, v12, a0 -; CHECK-NEXT: vrgather.vv v12, v8, v16 -; CHECK-NEXT: vmv.v.v v8, v12 -; CHECK-NEXT: ret +; RV32-LABEL: reverse_nxv16f16: +; RV32: # %bb.0: +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 1 +; RV32-NEXT: addi a0, a0, -1 +; RV32-NEXT: vsetvli a1, zero, e16, m4, ta, ma +; RV32-NEXT: vid.v v12 +; RV32-NEXT: vrsub.vx v16, v12, a0 +; RV32-NEXT: vrgather.vv v12, v8, v16 +; RV32-NEXT: vmv.v.v v8, v12 +; RV32-NEXT: ret +; +; RV64-LABEL: reverse_nxv16f16: +; RV64: # %bb.0: +; RV64-NEXT: csrr a0, vlenb +; RV64-NEXT: slli a0, a0, 1 +; RV64-NEXT: addiw a0, a0, -1 +; RV64-NEXT: vsetvli a1, zero, e16, m4, ta, ma +; RV64-NEXT: vid.v v12 +; RV64-NEXT: vrsub.vx v16, v12, a0 +; RV64-NEXT: vrgather.vv v12, v8, v16 +; RV64-NEXT: vmv.v.v v8, v12 +; RV64-NEXT: ret %res = call @llvm.experimental.vector.reverse.nxv16f16( %a) ret %res } define @reverse_nxv32f16( %a) { -; CHECK-LABEL: reverse_nxv32f16: -; CHECK: # %bb.0: -; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: slli a0, a0, 2 -; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, ma -; CHECK-NEXT: vid.v v16 -; CHECK-NEXT: vrsub.vx v24, v16, a0 -; CHECK-NEXT: vrgather.vv v16, v8, v24 -; CHECK-NEXT: vmv.v.v v8, v16 -; CHECK-NEXT: ret +; RV32-LABEL: reverse_nxv32f16: +; RV32: # %bb.0: +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 2 +; RV32-NEXT: addi a0, a0, -1 +; RV32-NEXT: vsetvli a1, zero, e16, m8, ta, ma +; RV32-NEXT: vid.v v16 +; RV32-NEXT: vrsub.vx v24, v16, a0 +; RV32-NEXT: vrgather.vv v16, v8, v24 +; RV32-NEXT: vmv.v.v v8, v16 +; RV32-NEXT: ret +; +; RV64-LABEL: reverse_nxv32f16: +; RV64: # %bb.0: +; RV64-NEXT: csrr a0, vlenb +; RV64-NEXT: slli a0, a0, 2 +; RV64-NEXT: addiw a0, a0, -1 +; RV64-NEXT: vsetvli a1, zero, e16, m8, ta, ma +; RV64-NEXT: vid.v v16 +; RV64-NEXT: vrsub.vx v24, v16, a0 +; RV64-NEXT: vrgather.vv v16, v8, v24 +; RV64-NEXT: vmv.v.v v8, v16 +; RV64-NEXT: ret %res = call @llvm.experimental.vector.reverse.nxv32f16( %a) ret %res } define @reverse_nxv1f32( %a) { -; CHECK-LABEL: reverse_nxv1f32: -; CHECK: # %bb.0: -; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: srli a0, a0, 3 -; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma -; CHECK-NEXT: vid.v v9 -; CHECK-NEXT: vrsub.vx v10, v9, a0 -; CHECK-NEXT: vrgather.vv v9, v8, v10 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret +; RV32-LABEL: reverse_nxv1f32: +; RV32: # %bb.0: +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: srli a0, a0, 3 +; RV32-NEXT: addi a0, a0, -1 +; RV32-NEXT: vsetvli a1, zero, e32, mf2, ta, ma +; RV32-NEXT: vid.v v9 +; RV32-NEXT: vrsub.vx v10, v9, a0 +; RV32-NEXT: vrgather.vv v9, v8, v10 +; RV32-NEXT: vmv1r.v v8, v9 +; RV32-NEXT: ret +; +; RV64-LABEL: reverse_nxv1f32: +; RV64: # %bb.0: +; RV64-NEXT: csrr a0, vlenb +; RV64-NEXT: srli a0, a0, 3 +; RV64-NEXT: addiw a0, a0, -1 +; RV64-NEXT: vsetvli a1, zero, e32, mf2, ta, ma +; RV64-NEXT: vid.v v9 +; RV64-NEXT: vrsub.vx v10, v9, a0 +; RV64-NEXT: vrgather.vv v9, v8, v10 +; RV64-NEXT: vmv1r.v v8, v9 +; RV64-NEXT: ret %res = call @llvm.experimental.vector.reverse.nxv1f32( %a) ret %res } define @reverse_nxv2f32( %a) { -; CHECK-LABEL: reverse_nxv2f32: -; CHECK: # %bb.0: -; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: srli a0, a0, 2 -; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma -; CHECK-NEXT: vid.v v9 -; CHECK-NEXT: vrsub.vx v10, v9, a0 -; CHECK-NEXT: vrgather.vv v9, v8, v10 -; CHECK-NEXT: vmv.v.v v8, v9 -; CHECK-NEXT: ret +; RV32-LABEL: reverse_nxv2f32: +; RV32: # %bb.0: +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: srli a0, a0, 2 +; RV32-NEXT: addi a0, a0, -1 +; RV32-NEXT: vsetvli a1, zero, e32, m1, ta, ma +; RV32-NEXT: vid.v v9 +; RV32-NEXT: vrsub.vx v10, v9, a0 +; RV32-NEXT: vrgather.vv v9, v8, v10 +; RV32-NEXT: vmv.v.v v8, v9 +; RV32-NEXT: ret +; +; RV64-LABEL: reverse_nxv2f32: +; RV64: # %bb.0: +; RV64-NEXT: csrr a0, vlenb +; RV64-NEXT: srli a0, a0, 2 +; RV64-NEXT: addiw a0, a0, -1 +; RV64-NEXT: vsetvli a1, zero, e32, m1, ta, ma +; RV64-NEXT: vid.v v9 +; RV64-NEXT: vrsub.vx v10, v9, a0 +; RV64-NEXT: vrgather.vv v9, v8, v10 +; RV64-NEXT: vmv.v.v v8, v9 +; RV64-NEXT: ret %res = call @llvm.experimental.vector.reverse.nxv2f32( %a) ret %res } define @reverse_nxv4f32( %a) { -; CHECK-LABEL: reverse_nxv4f32: -; CHECK: # %bb.0: -; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: srli a0, a0, 1 -; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma -; CHECK-NEXT: vid.v v10 -; CHECK-NEXT: vrsub.vx v12, v10, a0 -; CHECK-NEXT: vrgather.vv v10, v8, v12 -; CHECK-NEXT: vmv.v.v v8, v10 -; CHECK-NEXT: ret +; RV32-LABEL: reverse_nxv4f32: +; RV32: # %bb.0: +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: srli a0, a0, 1 +; RV32-NEXT: addi a0, a0, -1 +; RV32-NEXT: vsetvli a1, zero, e32, m2, ta, ma +; RV32-NEXT: vid.v v10 +; RV32-NEXT: vrsub.vx v12, v10, a0 +; RV32-NEXT: vrgather.vv v10, v8, v12 +; RV32-NEXT: vmv.v.v v8, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: reverse_nxv4f32: +; RV64: # %bb.0: +; RV64-NEXT: csrr a0, vlenb +; RV64-NEXT: srli a0, a0, 1 +; RV64-NEXT: addiw a0, a0, -1 +; RV64-NEXT: vsetvli a1, zero, e32, m2, ta, ma +; RV64-NEXT: vid.v v10 +; RV64-NEXT: vrsub.vx v12, v10, a0 +; RV64-NEXT: vrgather.vv v10, v8, v12 +; RV64-NEXT: vmv.v.v v8, v10 +; RV64-NEXT: ret %res = call @llvm.experimental.vector.reverse.nxv4f32( %a) ret %res } define @reverse_nxv8f32( %a) { -; CHECK-LABEL: reverse_nxv8f32: -; CHECK: # %bb.0: -; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma -; CHECK-NEXT: vid.v v12 -; CHECK-NEXT: vrsub.vx v16, v12, a0 -; CHECK-NEXT: vrgather.vv v12, v8, v16 -; CHECK-NEXT: vmv.v.v v8, v12 -; CHECK-NEXT: ret +; RV32-LABEL: reverse_nxv8f32: +; RV32: # %bb.0: +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: addi a0, a0, -1 +; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma +; RV32-NEXT: vid.v v12 +; RV32-NEXT: vrsub.vx v16, v12, a0 +; RV32-NEXT: vrgather.vv v12, v8, v16 +; RV32-NEXT: vmv.v.v v8, v12 +; RV32-NEXT: ret +; +; RV64-LABEL: reverse_nxv8f32: +; RV64: # %bb.0: +; RV64-NEXT: csrr a0, vlenb +; RV64-NEXT: addiw a0, a0, -1 +; RV64-NEXT: vsetvli a1, zero, e32, m4, ta, ma +; RV64-NEXT: vid.v v12 +; RV64-NEXT: vrsub.vx v16, v12, a0 +; RV64-NEXT: vrgather.vv v12, v8, v16 +; RV64-NEXT: vmv.v.v v8, v12 +; RV64-NEXT: ret %res = call @llvm.experimental.vector.reverse.nxv8f32( %a) ret %res } define @reverse_nxv16f32( %a) { -; CHECK-LABEL: reverse_nxv16f32: -; CHECK: # %bb.0: -; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: slli a0, a0, 1 -; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, ma -; CHECK-NEXT: vid.v v16 -; CHECK-NEXT: vrsub.vx v24, v16, a0 -; CHECK-NEXT: vrgather.vv v16, v8, v24 -; CHECK-NEXT: vmv.v.v v8, v16 -; CHECK-NEXT: ret +; RV32-LABEL: reverse_nxv16f32: +; RV32: # %bb.0: +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 1 +; RV32-NEXT: addi a0, a0, -1 +; RV32-NEXT: vsetvli a1, zero, e32, m8, ta, ma +; RV32-NEXT: vid.v v16 +; RV32-NEXT: vrsub.vx v24, v16, a0 +; RV32-NEXT: vrgather.vv v16, v8, v24 +; RV32-NEXT: vmv.v.v v8, v16 +; RV32-NEXT: ret +; +; RV64-LABEL: reverse_nxv16f32: +; RV64: # %bb.0: +; RV64-NEXT: csrr a0, vlenb +; RV64-NEXT: slli a0, a0, 1 +; RV64-NEXT: addiw a0, a0, -1 +; RV64-NEXT: vsetvli a1, zero, e32, m8, ta, ma +; RV64-NEXT: vid.v v16 +; RV64-NEXT: vrsub.vx v24, v16, a0 +; RV64-NEXT: vrgather.vv v16, v8, v24 +; RV64-NEXT: vmv.v.v v8, v16 +; RV64-NEXT: ret %res = call @llvm.experimental.vector.reverse.nxv16f32( %a) ret %res } @@ -1668,221 +1928,77 @@ define @reverse_nxv6i64( %a) { } define @reverse_nxv12i64( %a) { -; RV32-BITS-UNKNOWN-LABEL: reverse_nxv12i64: -; RV32-BITS-UNKNOWN: # %bb.0: -; RV32-BITS-UNKNOWN-NEXT: addi sp, sp, -80 -; RV32-BITS-UNKNOWN-NEXT: .cfi_def_cfa_offset 80 -; RV32-BITS-UNKNOWN-NEXT: sw ra, 76(sp) # 4-byte Folded Spill -; RV32-BITS-UNKNOWN-NEXT: sw s0, 72(sp) # 4-byte Folded Spill -; RV32-BITS-UNKNOWN-NEXT: .cfi_offset ra, -4 -; RV32-BITS-UNKNOWN-NEXT: .cfi_offset s0, -8 -; RV32-BITS-UNKNOWN-NEXT: addi s0, sp, 80 -; RV32-BITS-UNKNOWN-NEXT: .cfi_def_cfa s0, 0 -; RV32-BITS-UNKNOWN-NEXT: csrr a0, vlenb -; RV32-BITS-UNKNOWN-NEXT: slli a0, a0, 4 -; RV32-BITS-UNKNOWN-NEXT: sub sp, sp, a0 -; RV32-BITS-UNKNOWN-NEXT: andi sp, sp, -64 -; RV32-BITS-UNKNOWN-NEXT: csrr a0, vlenb -; RV32-BITS-UNKNOWN-NEXT: addi a1, a0, -1 -; RV32-BITS-UNKNOWN-NEXT: vsetvli a2, zero, e64, m8, ta, ma -; RV32-BITS-UNKNOWN-NEXT: vid.v v24 -; RV32-BITS-UNKNOWN-NEXT: vrsub.vx v24, v24, a1 -; RV32-BITS-UNKNOWN-NEXT: vrgather.vv v0, v16, v24 -; RV32-BITS-UNKNOWN-NEXT: vmv4r.v v16, v4 -; RV32-BITS-UNKNOWN-NEXT: vrgather.vv v0, v8, v24 -; RV32-BITS-UNKNOWN-NEXT: vmv4r.v v20, v0 -; RV32-BITS-UNKNOWN-NEXT: slli a0, a0, 3 -; RV32-BITS-UNKNOWN-NEXT: addi a1, sp, 64 -; RV32-BITS-UNKNOWN-NEXT: add a0, a1, a0 -; RV32-BITS-UNKNOWN-NEXT: vs4r.v v4, (a0) -; RV32-BITS-UNKNOWN-NEXT: vs8r.v v16, (a1) -; RV32-BITS-UNKNOWN-NEXT: vl8re64.v v16, (a0) -; RV32-BITS-UNKNOWN-NEXT: vl8re64.v v8, (a1) -; RV32-BITS-UNKNOWN-NEXT: addi sp, s0, -80 -; RV32-BITS-UNKNOWN-NEXT: lw ra, 76(sp) # 4-byte Folded Reload -; RV32-BITS-UNKNOWN-NEXT: lw s0, 72(sp) # 4-byte Folded Reload -; RV32-BITS-UNKNOWN-NEXT: addi sp, sp, 80 -; RV32-BITS-UNKNOWN-NEXT: ret -; -; RV32-BITS-256-LABEL: reverse_nxv12i64: -; RV32-BITS-256: # %bb.0: -; RV32-BITS-256-NEXT: addi sp, sp, -80 -; RV32-BITS-256-NEXT: .cfi_def_cfa_offset 80 -; RV32-BITS-256-NEXT: sw ra, 76(sp) # 4-byte Folded Spill -; RV32-BITS-256-NEXT: sw s0, 72(sp) # 4-byte Folded Spill -; RV32-BITS-256-NEXT: .cfi_offset ra, -4 -; RV32-BITS-256-NEXT: .cfi_offset s0, -8 -; RV32-BITS-256-NEXT: addi s0, sp, 80 -; RV32-BITS-256-NEXT: .cfi_def_cfa s0, 0 -; RV32-BITS-256-NEXT: csrr a0, vlenb -; RV32-BITS-256-NEXT: slli a0, a0, 4 -; RV32-BITS-256-NEXT: sub sp, sp, a0 -; RV32-BITS-256-NEXT: andi sp, sp, -64 -; RV32-BITS-256-NEXT: csrr a0, vlenb -; RV32-BITS-256-NEXT: addi a1, a0, -1 -; RV32-BITS-256-NEXT: vsetvli a2, zero, e64, m8, ta, ma -; RV32-BITS-256-NEXT: vid.v v24 -; RV32-BITS-256-NEXT: vrsub.vx v24, v24, a1 -; RV32-BITS-256-NEXT: vrgather.vv v0, v16, v24 -; RV32-BITS-256-NEXT: vmv4r.v v16, v4 -; RV32-BITS-256-NEXT: vrgather.vv v0, v8, v24 -; RV32-BITS-256-NEXT: vmv4r.v v20, v0 -; RV32-BITS-256-NEXT: slli a0, a0, 3 -; RV32-BITS-256-NEXT: addi a1, sp, 64 -; RV32-BITS-256-NEXT: add a0, a1, a0 -; RV32-BITS-256-NEXT: vs4r.v v4, (a0) -; RV32-BITS-256-NEXT: vs8r.v v16, (a1) -; RV32-BITS-256-NEXT: vl8re64.v v16, (a0) -; RV32-BITS-256-NEXT: vl8re64.v v8, (a1) -; RV32-BITS-256-NEXT: addi sp, s0, -80 -; RV32-BITS-256-NEXT: lw ra, 76(sp) # 4-byte Folded Reload -; RV32-BITS-256-NEXT: lw s0, 72(sp) # 4-byte Folded Reload -; RV32-BITS-256-NEXT: addi sp, sp, 80 -; RV32-BITS-256-NEXT: ret -; -; RV32-BITS-512-LABEL: reverse_nxv12i64: -; RV32-BITS-512: # %bb.0: -; RV32-BITS-512-NEXT: addi sp, sp, -80 -; RV32-BITS-512-NEXT: .cfi_def_cfa_offset 80 -; RV32-BITS-512-NEXT: sw ra, 76(sp) # 4-byte Folded Spill -; RV32-BITS-512-NEXT: sw s0, 72(sp) # 4-byte Folded Spill -; RV32-BITS-512-NEXT: .cfi_offset ra, -4 -; RV32-BITS-512-NEXT: .cfi_offset s0, -8 -; RV32-BITS-512-NEXT: addi s0, sp, 80 -; RV32-BITS-512-NEXT: .cfi_def_cfa s0, 0 -; RV32-BITS-512-NEXT: csrr a0, vlenb -; RV32-BITS-512-NEXT: slli a0, a0, 4 -; RV32-BITS-512-NEXT: sub sp, sp, a0 -; RV32-BITS-512-NEXT: andi sp, sp, -64 -; RV32-BITS-512-NEXT: csrr a0, vlenb -; RV32-BITS-512-NEXT: addi a1, a0, -1 -; RV32-BITS-512-NEXT: vsetvli a2, zero, e64, m8, ta, ma -; RV32-BITS-512-NEXT: vid.v v24 -; RV32-BITS-512-NEXT: vrsub.vx v24, v24, a1 -; RV32-BITS-512-NEXT: vrgather.vv v0, v16, v24 -; RV32-BITS-512-NEXT: vmv4r.v v16, v4 -; RV32-BITS-512-NEXT: vrgather.vv v0, v8, v24 -; RV32-BITS-512-NEXT: vmv4r.v v20, v0 -; RV32-BITS-512-NEXT: slli a0, a0, 3 -; RV32-BITS-512-NEXT: addi a1, sp, 64 -; RV32-BITS-512-NEXT: add a0, a1, a0 -; RV32-BITS-512-NEXT: vs4r.v v4, (a0) -; RV32-BITS-512-NEXT: vs8r.v v16, (a1) -; RV32-BITS-512-NEXT: vl8re64.v v16, (a0) -; RV32-BITS-512-NEXT: vl8re64.v v8, (a1) -; RV32-BITS-512-NEXT: addi sp, s0, -80 -; RV32-BITS-512-NEXT: lw ra, 76(sp) # 4-byte Folded Reload -; RV32-BITS-512-NEXT: lw s0, 72(sp) # 4-byte Folded Reload -; RV32-BITS-512-NEXT: addi sp, sp, 80 -; RV32-BITS-512-NEXT: ret -; -; RV64-BITS-UNKNOWN-LABEL: reverse_nxv12i64: -; RV64-BITS-UNKNOWN: # %bb.0: -; RV64-BITS-UNKNOWN-NEXT: addi sp, sp, -80 -; RV64-BITS-UNKNOWN-NEXT: .cfi_def_cfa_offset 80 -; RV64-BITS-UNKNOWN-NEXT: sd ra, 72(sp) # 8-byte Folded Spill -; RV64-BITS-UNKNOWN-NEXT: sd s0, 64(sp) # 8-byte Folded Spill -; RV64-BITS-UNKNOWN-NEXT: .cfi_offset ra, -8 -; RV64-BITS-UNKNOWN-NEXT: .cfi_offset s0, -16 -; RV64-BITS-UNKNOWN-NEXT: addi s0, sp, 80 -; RV64-BITS-UNKNOWN-NEXT: .cfi_def_cfa s0, 0 -; RV64-BITS-UNKNOWN-NEXT: csrr a0, vlenb -; RV64-BITS-UNKNOWN-NEXT: slli a0, a0, 4 -; RV64-BITS-UNKNOWN-NEXT: sub sp, sp, a0 -; RV64-BITS-UNKNOWN-NEXT: andi sp, sp, -64 -; RV64-BITS-UNKNOWN-NEXT: csrr a0, vlenb -; RV64-BITS-UNKNOWN-NEXT: addi a1, a0, -1 -; RV64-BITS-UNKNOWN-NEXT: vsetvli a2, zero, e64, m8, ta, ma -; RV64-BITS-UNKNOWN-NEXT: vid.v v24 -; RV64-BITS-UNKNOWN-NEXT: vrsub.vx v24, v24, a1 -; RV64-BITS-UNKNOWN-NEXT: vrgather.vv v0, v16, v24 -; RV64-BITS-UNKNOWN-NEXT: vmv4r.v v16, v4 -; RV64-BITS-UNKNOWN-NEXT: vrgather.vv v0, v8, v24 -; RV64-BITS-UNKNOWN-NEXT: vmv4r.v v20, v0 -; RV64-BITS-UNKNOWN-NEXT: slli a0, a0, 3 -; RV64-BITS-UNKNOWN-NEXT: addi a1, sp, 64 -; RV64-BITS-UNKNOWN-NEXT: add a0, a1, a0 -; RV64-BITS-UNKNOWN-NEXT: vs4r.v v4, (a0) -; RV64-BITS-UNKNOWN-NEXT: vs8r.v v16, (a1) -; RV64-BITS-UNKNOWN-NEXT: vl8re64.v v16, (a0) -; RV64-BITS-UNKNOWN-NEXT: vl8re64.v v8, (a1) -; RV64-BITS-UNKNOWN-NEXT: addi sp, s0, -80 -; RV64-BITS-UNKNOWN-NEXT: ld ra, 72(sp) # 8-byte Folded Reload -; RV64-BITS-UNKNOWN-NEXT: ld s0, 64(sp) # 8-byte Folded Reload -; RV64-BITS-UNKNOWN-NEXT: addi sp, sp, 80 -; RV64-BITS-UNKNOWN-NEXT: ret -; -; RV64-BITS-256-LABEL: reverse_nxv12i64: -; RV64-BITS-256: # %bb.0: -; RV64-BITS-256-NEXT: addi sp, sp, -80 -; RV64-BITS-256-NEXT: .cfi_def_cfa_offset 80 -; RV64-BITS-256-NEXT: sd ra, 72(sp) # 8-byte Folded Spill -; RV64-BITS-256-NEXT: sd s0, 64(sp) # 8-byte Folded Spill -; RV64-BITS-256-NEXT: .cfi_offset ra, -8 -; RV64-BITS-256-NEXT: .cfi_offset s0, -16 -; RV64-BITS-256-NEXT: addi s0, sp, 80 -; RV64-BITS-256-NEXT: .cfi_def_cfa s0, 0 -; RV64-BITS-256-NEXT: csrr a0, vlenb -; RV64-BITS-256-NEXT: slli a0, a0, 4 -; RV64-BITS-256-NEXT: sub sp, sp, a0 -; RV64-BITS-256-NEXT: andi sp, sp, -64 -; RV64-BITS-256-NEXT: csrr a0, vlenb -; RV64-BITS-256-NEXT: addi a1, a0, -1 -; RV64-BITS-256-NEXT: vsetvli a2, zero, e64, m8, ta, ma -; RV64-BITS-256-NEXT: vid.v v24 -; RV64-BITS-256-NEXT: vrsub.vx v24, v24, a1 -; RV64-BITS-256-NEXT: vrgather.vv v0, v16, v24 -; RV64-BITS-256-NEXT: vmv4r.v v16, v4 -; RV64-BITS-256-NEXT: vrgather.vv v0, v8, v24 -; RV64-BITS-256-NEXT: vmv4r.v v20, v0 -; RV64-BITS-256-NEXT: slli a0, a0, 3 -; RV64-BITS-256-NEXT: addi a1, sp, 64 -; RV64-BITS-256-NEXT: add a0, a1, a0 -; RV64-BITS-256-NEXT: vs4r.v v4, (a0) -; RV64-BITS-256-NEXT: vs8r.v v16, (a1) -; RV64-BITS-256-NEXT: vl8re64.v v16, (a0) -; RV64-BITS-256-NEXT: vl8re64.v v8, (a1) -; RV64-BITS-256-NEXT: addi sp, s0, -80 -; RV64-BITS-256-NEXT: ld ra, 72(sp) # 8-byte Folded Reload -; RV64-BITS-256-NEXT: ld s0, 64(sp) # 8-byte Folded Reload -; RV64-BITS-256-NEXT: addi sp, sp, 80 -; RV64-BITS-256-NEXT: ret -; -; RV64-BITS-512-LABEL: reverse_nxv12i64: -; RV64-BITS-512: # %bb.0: -; RV64-BITS-512-NEXT: addi sp, sp, -80 -; RV64-BITS-512-NEXT: .cfi_def_cfa_offset 80 -; RV64-BITS-512-NEXT: sd ra, 72(sp) # 8-byte Folded Spill -; RV64-BITS-512-NEXT: sd s0, 64(sp) # 8-byte Folded Spill -; RV64-BITS-512-NEXT: .cfi_offset ra, -8 -; RV64-BITS-512-NEXT: .cfi_offset s0, -16 -; RV64-BITS-512-NEXT: addi s0, sp, 80 -; RV64-BITS-512-NEXT: .cfi_def_cfa s0, 0 -; RV64-BITS-512-NEXT: csrr a0, vlenb -; RV64-BITS-512-NEXT: slli a0, a0, 4 -; RV64-BITS-512-NEXT: sub sp, sp, a0 -; RV64-BITS-512-NEXT: andi sp, sp, -64 -; RV64-BITS-512-NEXT: csrr a0, vlenb -; RV64-BITS-512-NEXT: addi a1, a0, -1 -; RV64-BITS-512-NEXT: vsetvli a2, zero, e64, m8, ta, ma -; RV64-BITS-512-NEXT: vid.v v24 -; RV64-BITS-512-NEXT: vrsub.vx v24, v24, a1 -; RV64-BITS-512-NEXT: vrgather.vv v0, v16, v24 -; RV64-BITS-512-NEXT: vmv4r.v v16, v4 -; RV64-BITS-512-NEXT: vrgather.vv v0, v8, v24 -; RV64-BITS-512-NEXT: vmv4r.v v20, v0 -; RV64-BITS-512-NEXT: slli a0, a0, 3 -; RV64-BITS-512-NEXT: addi a1, sp, 64 -; RV64-BITS-512-NEXT: add a0, a1, a0 -; RV64-BITS-512-NEXT: vs4r.v v4, (a0) -; RV64-BITS-512-NEXT: vs8r.v v16, (a1) -; RV64-BITS-512-NEXT: vl8re64.v v16, (a0) -; RV64-BITS-512-NEXT: vl8re64.v v8, (a1) -; RV64-BITS-512-NEXT: addi sp, s0, -80 -; RV64-BITS-512-NEXT: ld ra, 72(sp) # 8-byte Folded Reload -; RV64-BITS-512-NEXT: ld s0, 64(sp) # 8-byte Folded Reload -; RV64-BITS-512-NEXT: addi sp, sp, 80 -; RV64-BITS-512-NEXT: ret +; RV32-LABEL: reverse_nxv12i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -80 +; RV32-NEXT: .cfi_def_cfa_offset 80 +; RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill +; RV32-NEXT: sw s0, 72(sp) # 4-byte Folded Spill +; RV32-NEXT: .cfi_offset ra, -4 +; RV32-NEXT: .cfi_offset s0, -8 +; RV32-NEXT: addi s0, sp, 80 +; RV32-NEXT: .cfi_def_cfa s0, 0 +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 4 +; RV32-NEXT: sub sp, sp, a0 +; RV32-NEXT: andi sp, sp, -64 +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: addi a1, a0, -1 +; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, ma +; RV32-NEXT: vid.v v24 +; RV32-NEXT: vrsub.vx v24, v24, a1 +; RV32-NEXT: vrgather.vv v0, v16, v24 +; RV32-NEXT: vmv4r.v v16, v4 +; RV32-NEXT: vrgather.vv v0, v8, v24 +; RV32-NEXT: vmv4r.v v20, v0 +; RV32-NEXT: slli a0, a0, 3 +; RV32-NEXT: addi a1, sp, 64 +; RV32-NEXT: add a0, a1, a0 +; RV32-NEXT: vs4r.v v4, (a0) +; RV32-NEXT: vs8r.v v16, (a1) +; RV32-NEXT: vl8re64.v v16, (a0) +; RV32-NEXT: vl8re64.v v8, (a1) +; RV32-NEXT: addi sp, s0, -80 +; RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload +; RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload +; RV32-NEXT: addi sp, sp, 80 +; RV32-NEXT: ret +; +; RV64-LABEL: reverse_nxv12i64: +; RV64: # %bb.0: +; RV64-NEXT: addi sp, sp, -80 +; RV64-NEXT: .cfi_def_cfa_offset 80 +; RV64-NEXT: sd ra, 72(sp) # 8-byte Folded Spill +; RV64-NEXT: sd s0, 64(sp) # 8-byte Folded Spill +; RV64-NEXT: .cfi_offset ra, -8 +; RV64-NEXT: .cfi_offset s0, -16 +; RV64-NEXT: addi s0, sp, 80 +; RV64-NEXT: .cfi_def_cfa s0, 0 +; RV64-NEXT: csrr a0, vlenb +; RV64-NEXT: slli a0, a0, 4 +; RV64-NEXT: sub sp, sp, a0 +; RV64-NEXT: andi sp, sp, -64 +; RV64-NEXT: csrr a0, vlenb +; RV64-NEXT: addi a1, a0, -1 +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma +; RV64-NEXT: vid.v v24 +; RV64-NEXT: vrsub.vx v24, v24, a1 +; RV64-NEXT: vrgather.vv v0, v16, v24 +; RV64-NEXT: vmv4r.v v16, v4 +; RV64-NEXT: vrgather.vv v0, v8, v24 +; RV64-NEXT: vmv4r.v v20, v0 +; RV64-NEXT: slli a0, a0, 3 +; RV64-NEXT: addi a1, sp, 64 +; RV64-NEXT: add a0, a1, a0 +; RV64-NEXT: vs4r.v v4, (a0) +; RV64-NEXT: vs8r.v v16, (a1) +; RV64-NEXT: vl8re64.v v16, (a0) +; RV64-NEXT: vl8re64.v v8, (a1) +; RV64-NEXT: addi sp, s0, -80 +; RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload +; RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload +; RV64-NEXT: addi sp, sp, 80 +; RV64-NEXT: ret %res = call @llvm.experimental.vector.reverse.nxv12i64( %a) ret %res } diff --git a/llvm/test/CodeGen/RISCV/rvv/sshl_sat_vec.ll b/llvm/test/CodeGen/RISCV/rvv/sshl_sat_vec.ll index 443fe93a618c5..56d98981947c3 100644 --- a/llvm/test/CodeGen/RISCV/rvv/sshl_sat_vec.ll +++ b/llvm/test/CodeGen/RISCV/rvv/sshl_sat_vec.ll @@ -32,13 +32,11 @@ define <4 x i32> @vec_v4i32(<4 x i32> %x, <4 x i32> %y) nounwind { ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vmsle.vi v0, v8, -1 ; CHECK-NEXT: lui a0, 524288 -; CHECK-NEXT: addiw a0, a0, -1 +; CHECK-NEXT: addiw a1, a0, -1 ; CHECK-NEXT: vsll.vv v10, v8, v9 ; CHECK-NEXT: vsra.vv v9, v10, v9 ; CHECK-NEXT: vmsne.vv v8, v8, v9 -; CHECK-NEXT: vmv.v.x v9, a0 -; CHECK-NEXT: li a0, 1 -; CHECK-NEXT: slli a0, a0, 31 +; CHECK-NEXT: vmv.v.x v9, a1 ; CHECK-NEXT: vmerge.vxm v9, v9, a0, v0 ; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: vmerge.vvm v8, v10, v9, v0 @@ -116,13 +114,11 @@ define @vec_nxv4i32( %x, ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vmsle.vi v0, v8, -1 ; CHECK-NEXT: lui a0, 524288 -; CHECK-NEXT: addiw a0, a0, -1 +; CHECK-NEXT: addiw a1, a0, -1 ; CHECK-NEXT: vsll.vv v12, v8, v10 ; CHECK-NEXT: vsra.vv v14, v12, v10 ; CHECK-NEXT: vmsne.vv v10, v8, v14 -; CHECK-NEXT: vmv.v.x v8, a0 -; CHECK-NEXT: li a0, 1 -; CHECK-NEXT: slli a0, a0, 31 +; CHECK-NEXT: vmv.v.x v8, a1 ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-fixed.ll b/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-fixed.ll index c674670d8d030..5be8530190b2f 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-fixed.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-fixed.ll @@ -5,61 +5,32 @@ ; Integers define {<16 x i1>, <16 x i1>} @vector_deinterleave_v16i1_v32i1(<32 x i1> %vec) { -; RV32-LABEL: vector_deinterleave_v16i1_v32i1: -; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 16, e8, m1, ta, ma -; RV32-NEXT: vmv.v.i v8, 0 -; RV32-NEXT: vmerge.vim v10, v8, 1, v0 -; RV32-NEXT: vid.v v9 -; RV32-NEXT: vadd.vv v11, v9, v9 -; RV32-NEXT: vrgather.vv v9, v10, v11 -; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma -; RV32-NEXT: vslidedown.vi v0, v0, 2 -; RV32-NEXT: vsetivli zero, 16, e8, m1, ta, ma -; RV32-NEXT: vmerge.vim v8, v8, 1, v0 -; RV32-NEXT: vadd.vi v12, v11, -16 -; RV32-NEXT: lui a0, 16 -; RV32-NEXT: addi a0, a0, -256 -; RV32-NEXT: vsetivli zero, 1, e16, mf4, ta, ma -; RV32-NEXT: vmv.s.x v0, a0 -; RV32-NEXT: vsetivli zero, 16, e8, m1, ta, mu -; RV32-NEXT: vrgather.vv v9, v8, v12, v0.t -; RV32-NEXT: vmsne.vi v9, v9, 0 -; RV32-NEXT: vadd.vi v12, v11, 1 -; RV32-NEXT: vrgather.vv v13, v10, v12 -; RV32-NEXT: vadd.vi v10, v11, -15 -; RV32-NEXT: vrgather.vv v13, v8, v10, v0.t -; RV32-NEXT: vmsne.vi v8, v13, 0 -; RV32-NEXT: vmv.v.v v0, v9 -; RV32-NEXT: ret -; -; RV64-LABEL: vector_deinterleave_v16i1_v32i1: -; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 16, e8, m1, ta, ma -; RV64-NEXT: vmv.v.i v8, 0 -; RV64-NEXT: vmerge.vim v10, v8, 1, v0 -; RV64-NEXT: vid.v v9 -; RV64-NEXT: vadd.vv v11, v9, v9 -; RV64-NEXT: vrgather.vv v9, v10, v11 -; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma -; RV64-NEXT: vslidedown.vi v0, v0, 2 -; RV64-NEXT: vsetivli zero, 16, e8, m1, ta, ma -; RV64-NEXT: vmerge.vim v8, v8, 1, v0 -; RV64-NEXT: vadd.vi v12, v11, -16 -; RV64-NEXT: lui a0, 16 -; RV64-NEXT: addiw a0, a0, -256 -; RV64-NEXT: vsetivli zero, 1, e16, mf4, ta, ma -; RV64-NEXT: vmv.s.x v0, a0 -; RV64-NEXT: vsetivli zero, 16, e8, m1, ta, mu -; RV64-NEXT: vrgather.vv v9, v8, v12, v0.t -; RV64-NEXT: vmsne.vi v9, v9, 0 -; RV64-NEXT: vadd.vi v12, v11, 1 -; RV64-NEXT: vrgather.vv v13, v10, v12 -; RV64-NEXT: vadd.vi v10, v11, -15 -; RV64-NEXT: vrgather.vv v13, v8, v10, v0.t -; RV64-NEXT: vmsne.vi v8, v13, 0 -; RV64-NEXT: vmv.v.v v0, v9 -; RV64-NEXT: ret +; CHECK-LABEL: vector_deinterleave_v16i1_v32i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma +; CHECK-NEXT: vmv.v.i v8, 0 +; CHECK-NEXT: vmerge.vim v10, v8, 1, v0 +; CHECK-NEXT: vid.v v9 +; CHECK-NEXT: vadd.vv v11, v9, v9 +; CHECK-NEXT: vrgather.vv v9, v10, v11 +; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma +; CHECK-NEXT: vslidedown.vi v0, v0, 2 +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma +; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 +; CHECK-NEXT: vadd.vi v12, v11, -16 +; CHECK-NEXT: li a0, -256 +; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma +; CHECK-NEXT: vmv.s.x v0, a0 +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vrgather.vv v9, v8, v12, v0.t +; CHECK-NEXT: vmsne.vi v9, v9, 0 +; CHECK-NEXT: vadd.vi v12, v11, 1 +; CHECK-NEXT: vrgather.vv v13, v10, v12 +; CHECK-NEXT: vadd.vi v10, v11, -15 +; CHECK-NEXT: vrgather.vv v13, v8, v10, v0.t +; CHECK-NEXT: vmsne.vi v8, v13, 0 +; CHECK-NEXT: vmv.v.v v0, v9 +; CHECK-NEXT: ret %retval = call {<16 x i1>, <16 x i1>} @llvm.experimental.vector.deinterleave2.v32i1(<32 x i1> %vec) ret {<16 x i1>, <16 x i1>} %retval } @@ -213,3 +184,6 @@ declare {<2 x float>, <2 x float>} @llvm.experimental.vector.deinterleave2.v4f32 declare {<8 x half>, <8 x half>} @llvm.experimental.vector.deinterleave2.v16f16(<16 x half>) declare {<4 x float>, <4 x float>} @llvm.experimental.vector.deinterleave2.v8f32(<8 x float>) declare {<2 x double>, <2 x double>} @llvm.experimental.vector.deinterleave2.v4f64(<4 x double>) +;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: +; RV32: {{.*}} +; RV64: {{.*}} diff --git a/llvm/test/CodeGen/RISCV/rvv/vmulh-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vmulh-sdnode.ll index 06f5d39622da8..69191ebb9256d 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmulh-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmulh-sdnode.ll @@ -60,21 +60,12 @@ define @vmulh_vx_nxv1i32( %va, i32 %x) { } define @vmulh_vi_nxv1i32_0( %va) { -; RV32-LABEL: vmulh_vi_nxv1i32_0: -; RV32: # %bb.0: -; RV32-NEXT: li a0, -7 -; RV32-NEXT: vsetvli a1, zero, e32, mf2, ta, ma -; RV32-NEXT: vmulh.vx v8, v8, a0 -; RV32-NEXT: ret -; -; RV64-LABEL: vmulh_vi_nxv1i32_0: -; RV64: # %bb.0: -; RV64-NEXT: li a0, 1 -; RV64-NEXT: slli a0, a0, 32 -; RV64-NEXT: addi a0, a0, -7 -; RV64-NEXT: vsetvli a1, zero, e32, mf2, ta, ma -; RV64-NEXT: vmulh.vx v8, v8, a0 -; RV64-NEXT: ret +; CHECK-LABEL: vmulh_vi_nxv1i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: li a0, -7 +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma +; CHECK-NEXT: vmulh.vx v8, v8, a0 +; CHECK-NEXT: ret %head1 = insertelement poison, i32 -7, i32 0 %splat1 = shufflevector %head1, poison, zeroinitializer %vb = sext %splat1 to @@ -141,21 +132,12 @@ define @vmulh_vx_nxv2i32( %va, i32 %x) { } define @vmulh_vi_nxv2i32_0( %va) { -; RV32-LABEL: vmulh_vi_nxv2i32_0: -; RV32: # %bb.0: -; RV32-NEXT: li a0, -7 -; RV32-NEXT: vsetvli a1, zero, e32, m1, ta, ma -; RV32-NEXT: vmulh.vx v8, v8, a0 -; RV32-NEXT: ret -; -; RV64-LABEL: vmulh_vi_nxv2i32_0: -; RV64: # %bb.0: -; RV64-NEXT: li a0, 1 -; RV64-NEXT: slli a0, a0, 32 -; RV64-NEXT: addi a0, a0, -7 -; RV64-NEXT: vsetvli a1, zero, e32, m1, ta, ma -; RV64-NEXT: vmulh.vx v8, v8, a0 -; RV64-NEXT: ret +; CHECK-LABEL: vmulh_vi_nxv2i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: li a0, -7 +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma +; CHECK-NEXT: vmulh.vx v8, v8, a0 +; CHECK-NEXT: ret %head1 = insertelement poison, i32 -7, i32 0 %splat1 = shufflevector %head1, poison, zeroinitializer %vb = sext %splat1 to @@ -222,21 +204,12 @@ define @vmulh_vx_nxv4i32( %va, i32 %x) { } define @vmulh_vi_nxv4i32_0( %va) { -; RV32-LABEL: vmulh_vi_nxv4i32_0: -; RV32: # %bb.0: -; RV32-NEXT: li a0, -7 -; RV32-NEXT: vsetvli a1, zero, e32, m2, ta, ma -; RV32-NEXT: vmulh.vx v8, v8, a0 -; RV32-NEXT: ret -; -; RV64-LABEL: vmulh_vi_nxv4i32_0: -; RV64: # %bb.0: -; RV64-NEXT: li a0, 1 -; RV64-NEXT: slli a0, a0, 32 -; RV64-NEXT: addi a0, a0, -7 -; RV64-NEXT: vsetvli a1, zero, e32, m2, ta, ma -; RV64-NEXT: vmulh.vx v8, v8, a0 -; RV64-NEXT: ret +; CHECK-LABEL: vmulh_vi_nxv4i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: li a0, -7 +; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma +; CHECK-NEXT: vmulh.vx v8, v8, a0 +; CHECK-NEXT: ret %head1 = insertelement poison, i32 -7, i32 0 %splat1 = shufflevector %head1, poison, zeroinitializer %vb = sext %splat1 to @@ -303,21 +276,12 @@ define @vmulh_vx_nxv8i32( %va, i32 %x) { } define @vmulh_vi_nxv8i32_0( %va) { -; RV32-LABEL: vmulh_vi_nxv8i32_0: -; RV32: # %bb.0: -; RV32-NEXT: li a0, -7 -; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma -; RV32-NEXT: vmulh.vx v8, v8, a0 -; RV32-NEXT: ret -; -; RV64-LABEL: vmulh_vi_nxv8i32_0: -; RV64: # %bb.0: -; RV64-NEXT: li a0, 1 -; RV64-NEXT: slli a0, a0, 32 -; RV64-NEXT: addi a0, a0, -7 -; RV64-NEXT: vsetvli a1, zero, e32, m4, ta, ma -; RV64-NEXT: vmulh.vx v8, v8, a0 -; RV64-NEXT: ret +; CHECK-LABEL: vmulh_vi_nxv8i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: li a0, -7 +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma +; CHECK-NEXT: vmulh.vx v8, v8, a0 +; CHECK-NEXT: ret %head1 = insertelement poison, i32 -7, i32 0 %splat1 = shufflevector %head1, poison, zeroinitializer %vb = sext %splat1 to @@ -348,3 +312,6 @@ define @vmulh_vi_nxv8i32_1( %va) { %vf = trunc %ve to ret %vf } +;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: +; RV32: {{.*}} +; RV64: {{.*}} diff --git a/llvm/test/CodeGen/RISCV/rvv/vmulhu-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vmulhu-sdnode.ll index 186d56b1293db..5354c17fd2a7d 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmulhu-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmulhu-sdnode.ll @@ -37,21 +37,12 @@ define @vmulhu_vx_nxv1i32( %va, i32 %x) { } define @vmulhu_vi_nxv1i32_0( %va) { -; RV32-LABEL: vmulhu_vi_nxv1i32_0: -; RV32: # %bb.0: -; RV32-NEXT: li a0, -7 -; RV32-NEXT: vsetvli a1, zero, e32, mf2, ta, ma -; RV32-NEXT: vmulhu.vx v8, v8, a0 -; RV32-NEXT: ret -; -; RV64-LABEL: vmulhu_vi_nxv1i32_0: -; RV64: # %bb.0: -; RV64-NEXT: li a0, 1 -; RV64-NEXT: slli a0, a0, 32 -; RV64-NEXT: addi a0, a0, -7 -; RV64-NEXT: vsetvli a1, zero, e32, mf2, ta, ma -; RV64-NEXT: vmulhu.vx v8, v8, a0 -; RV64-NEXT: ret +; CHECK-LABEL: vmulhu_vi_nxv1i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: li a0, -7 +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma +; CHECK-NEXT: vmulhu.vx v8, v8, a0 +; CHECK-NEXT: ret %head1 = insertelement poison, i32 -7, i32 0 %splat1 = shufflevector %head1, poison, zeroinitializer %vb = zext %splat1 to @@ -124,21 +115,12 @@ define @vmulhu_vx_nxv2i32( %va, i32 %x) { } define @vmulhu_vi_nxv2i32_0( %va) { -; RV32-LABEL: vmulhu_vi_nxv2i32_0: -; RV32: # %bb.0: -; RV32-NEXT: li a0, -7 -; RV32-NEXT: vsetvli a1, zero, e32, m1, ta, ma -; RV32-NEXT: vmulhu.vx v8, v8, a0 -; RV32-NEXT: ret -; -; RV64-LABEL: vmulhu_vi_nxv2i32_0: -; RV64: # %bb.0: -; RV64-NEXT: li a0, 1 -; RV64-NEXT: slli a0, a0, 32 -; RV64-NEXT: addi a0, a0, -7 -; RV64-NEXT: vsetvli a1, zero, e32, m1, ta, ma -; RV64-NEXT: vmulhu.vx v8, v8, a0 -; RV64-NEXT: ret +; CHECK-LABEL: vmulhu_vi_nxv2i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: li a0, -7 +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma +; CHECK-NEXT: vmulhu.vx v8, v8, a0 +; CHECK-NEXT: ret %head1 = insertelement poison, i32 -7, i32 0 %splat1 = shufflevector %head1, poison, zeroinitializer %vb = zext %splat1 to @@ -211,21 +193,12 @@ define @vmulhu_vx_nxv4i32( %va, i32 %x) { } define @vmulhu_vi_nxv4i32_0( %va) { -; RV32-LABEL: vmulhu_vi_nxv4i32_0: -; RV32: # %bb.0: -; RV32-NEXT: li a0, -7 -; RV32-NEXT: vsetvli a1, zero, e32, m2, ta, ma -; RV32-NEXT: vmulhu.vx v8, v8, a0 -; RV32-NEXT: ret -; -; RV64-LABEL: vmulhu_vi_nxv4i32_0: -; RV64: # %bb.0: -; RV64-NEXT: li a0, 1 -; RV64-NEXT: slli a0, a0, 32 -; RV64-NEXT: addi a0, a0, -7 -; RV64-NEXT: vsetvli a1, zero, e32, m2, ta, ma -; RV64-NEXT: vmulhu.vx v8, v8, a0 -; RV64-NEXT: ret +; CHECK-LABEL: vmulhu_vi_nxv4i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: li a0, -7 +; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma +; CHECK-NEXT: vmulhu.vx v8, v8, a0 +; CHECK-NEXT: ret %head1 = insertelement poison, i32 -7, i32 0 %splat1 = shufflevector %head1, poison, zeroinitializer %vb = zext %splat1 to @@ -298,21 +271,12 @@ define @vmulhu_vx_nxv8i32( %va, i32 %x) { } define @vmulhu_vi_nxv8i32_0( %va) { -; RV32-LABEL: vmulhu_vi_nxv8i32_0: -; RV32: # %bb.0: -; RV32-NEXT: li a0, -7 -; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma -; RV32-NEXT: vmulhu.vx v8, v8, a0 -; RV32-NEXT: ret -; -; RV64-LABEL: vmulhu_vi_nxv8i32_0: -; RV64: # %bb.0: -; RV64-NEXT: li a0, 1 -; RV64-NEXT: slli a0, a0, 32 -; RV64-NEXT: addi a0, a0, -7 -; RV64-NEXT: vsetvli a1, zero, e32, m4, ta, ma -; RV64-NEXT: vmulhu.vx v8, v8, a0 -; RV64-NEXT: ret +; CHECK-LABEL: vmulhu_vi_nxv8i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: li a0, -7 +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma +; CHECK-NEXT: vmulhu.vx v8, v8, a0 +; CHECK-NEXT: ret %head1 = insertelement poison, i32 -7, i32 0 %splat1 = shufflevector %head1, poison, zeroinitializer %vb = zext %splat1 to diff --git a/llvm/test/CodeGen/RISCV/rvv/vreductions-int-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vreductions-int-vp.ll index ba7f3e801aa07..6a8fe57f23f66 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vreductions-int-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vreductions-int-vp.ll @@ -852,8 +852,7 @@ define signext i32 @vpreduce_umax_nxv1i32(i32 signext %s, %v, ; ; RV64-LABEL: vpreduce_umax_nxv1i32: ; RV64: # %bb.0: -; RV64-NEXT: slli a0, a0, 32 -; RV64-NEXT: srli a0, a0, 32 +; RV64-NEXT: andi a0, a0, -1 ; RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; RV64-NEXT: vmv.s.x v9, a0 ; RV64-NEXT: vsetvli zero, a1, e32, mf2, ta, ma @@ -893,8 +892,7 @@ define signext i32 @vpreduce_umin_nxv1i32(i32 signext %s, %v, ; ; RV64-LABEL: vpreduce_umin_nxv1i32: ; RV64: # %bb.0: -; RV64-NEXT: slli a0, a0, 32 -; RV64-NEXT: srli a0, a0, 32 +; RV64-NEXT: andi a0, a0, -1 ; RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; RV64-NEXT: vmv.s.x v9, a0 ; RV64-NEXT: vsetvli zero, a1, e32, mf2, ta, ma @@ -994,8 +992,7 @@ define signext i32 @vpreduce_umax_nxv2i32(i32 signext %s, %v, ; ; RV64-LABEL: vpreduce_umax_nxv2i32: ; RV64: # %bb.0: -; RV64-NEXT: slli a0, a0, 32 -; RV64-NEXT: srli a0, a0, 32 +; RV64-NEXT: andi a0, a0, -1 ; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64-NEXT: vmv.s.x v9, a0 ; RV64-NEXT: vsetvli zero, a1, e32, m1, ta, ma @@ -1035,8 +1032,7 @@ define signext i32 @vpreduce_umin_nxv2i32(i32 signext %s, %v, ; ; RV64-LABEL: vpreduce_umin_nxv2i32: ; RV64: # %bb.0: -; RV64-NEXT: slli a0, a0, 32 -; RV64-NEXT: srli a0, a0, 32 +; RV64-NEXT: andi a0, a0, -1 ; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64-NEXT: vmv.s.x v9, a0 ; RV64-NEXT: vsetvli zero, a1, e32, m1, ta, ma @@ -1136,8 +1132,7 @@ define signext i32 @vpreduce_umax_nxv4i32(i32 signext %s, %v, ; ; RV64-LABEL: vpreduce_umax_nxv4i32: ; RV64: # %bb.0: -; RV64-NEXT: slli a0, a0, 32 -; RV64-NEXT: srli a0, a0, 32 +; RV64-NEXT: andi a0, a0, -1 ; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64-NEXT: vmv.s.x v10, a0 ; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, ma @@ -1182,8 +1177,7 @@ define signext i32 @vpreduce_umax_nxv32i32(i32 signext %s, % ; RV64-NEXT: srli a2, a3, 2 ; RV64-NEXT: vsetvli a4, zero, e8, mf2, ta, ma ; RV64-NEXT: vslidedown.vx v24, v0, a2 -; RV64-NEXT: slli a0, a0, 32 -; RV64-NEXT: srli a2, a0, 32 +; RV64-NEXT: andi a2, a0, -1 ; RV64-NEXT: slli a3, a3, 1 ; RV64-NEXT: sub a0, a1, a3 ; RV64-NEXT: sltu a4, a1, a0 @@ -1235,8 +1229,7 @@ define signext i32 @vpreduce_umin_nxv4i32(i32 signext %s, %v, ; ; RV64-LABEL: vpreduce_umin_nxv4i32: ; RV64: # %bb.0: -; RV64-NEXT: slli a0, a0, 32 -; RV64-NEXT: srli a0, a0, 32 +; RV64-NEXT: andi a0, a0, -1 ; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64-NEXT: vmv.s.x v10, a0 ; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, ma From 2f0aa7d23f7cca0794880a3861568b7439153211 Mon Sep 17 00:00:00 2001 From: Luke Lau Date: Tue, 26 Sep 2023 17:14:04 +0100 Subject: [PATCH 2/5] Remove VL TODO and unnecessary _VI cases --- llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp | 25 --------------------- llvm/lib/Target/RISCV/RISCVOptWInstrs.cpp | 25 --------------------- 2 files changed, 50 deletions(-) diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp index 6925ab557e477..36ba1fdd3e3b9 100644 --- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp @@ -2775,8 +2775,6 @@ static bool vectorPseudoHasAllNBitUsers(SDNode *User, unsigned UserOpNo, User->getNumOperands() - HasVecPolicyOp - HasChainOp - HasGlueOp - 2; const unsigned Log2SEW = User->getConstantOperandVal(VLIdx + 1); - // TODO: The Largest VL 65,536 occurs for LMUL=8 and SEW=8 with - // VLEN=65,536. We could check if Bits < 16 here. if (UserOpNo == VLIdx) return false; @@ -2787,16 +2785,11 @@ static bool vectorPseudoHasAllNBitUsers(SDNode *User, unsigned UserOpNo, // 11.6. Vector Single-Width Shift Instructions case RISCV::VSLL_VX: - case RISCV::VSLL_VI: case RISCV::VSRL_VX: - case RISCV::VSRL_VI: case RISCV::VSRA_VX: - case RISCV::VSRA_VI: // 12.4. Vector Single-Width Scaling Shift Instructions case RISCV::VSSRL_VX: - case RISCV::VSSRL_VI: case RISCV::VSSRA_VX: - case RISCV::VSSRA_VI: // Only the low lg2(SEW) bits of the shift-amount value are used. if (Bits < Log2SEW) return false; @@ -2804,14 +2797,10 @@ static bool vectorPseudoHasAllNBitUsers(SDNode *User, unsigned UserOpNo, // 11.7 Vector Narrowing Integer Right Shift Instructions case RISCV::VNSRL_WX: - case RISCV::VNSRL_WI: case RISCV::VNSRA_WX: - case RISCV::VNSRA_WI: // 12.5. Vector Narrowing Fixed-Point Clip Instructions case RISCV::VNCLIPU_WX: - case RISCV::VNCLIPU_WI: case RISCV::VNCLIP_WX: - case RISCV::VNCLIP_WI: // Only the low lg2(2*SEW) bits of the shift-amount value are used. if (Bits < Log2SEW + 1) return false; @@ -2819,10 +2808,8 @@ static bool vectorPseudoHasAllNBitUsers(SDNode *User, unsigned UserOpNo, // 11.1. Vector Single-Width Integer Add and Subtract case RISCV::VADD_VX: - case RISCV::VADD_VI: case RISCV::VSUB_VX: case RISCV::VRSUB_VX: - case RISCV::VRSUB_VI: // 11.2. Vector Widening Integer Add/Subtract case RISCV::VWADDU_VX: case RISCV::VWSUBU_VX: @@ -2838,32 +2825,22 @@ static bool vectorPseudoHasAllNBitUsers(SDNode *User, unsigned UserOpNo, case RISCV::VMADC_VXM: case RISCV::VMADC_VIM: case RISCV::VMADC_VX: - case RISCV::VMADC_VI: case RISCV::VSBC_VXM: case RISCV::VMSBC_VXM: case RISCV::VMSBC_VX: // 11.5 Vector Bitwise Logical Instructions case RISCV::VAND_VX: - case RISCV::VAND_VI: case RISCV::VOR_VX: - case RISCV::VOR_VI: case RISCV::VXOR_VX: - case RISCV::VXOR_VI: // 11.8. Vector Integer Compare Instructions case RISCV::VMSEQ_VX: - case RISCV::VMSEQ_VI: case RISCV::VMSNE_VX: - case RISCV::VMSNE_VI: case RISCV::VMSLTU_VX: case RISCV::VMSLT_VX: case RISCV::VMSLEU_VX: - case RISCV::VMSLEU_VI: case RISCV::VMSLE_VX: - case RISCV::VMSLE_VI: case RISCV::VMSGTU_VX: - case RISCV::VMSGTU_VI: case RISCV::VMSGT_VX: - case RISCV::VMSGT_VI: // 11.9. Vector Integer Min/Max Instructions case RISCV::VMINU_VX: case RISCV::VMIN_VX: @@ -2901,9 +2878,7 @@ static bool vectorPseudoHasAllNBitUsers(SDNode *User, unsigned UserOpNo, case RISCV::VMV_V_I: // 12.1. Vector Single-Width Saturating Add and Subtract case RISCV::VSADDU_VX: - case RISCV::VSADDU_VI: case RISCV::VSADD_VX: - case RISCV::VSADD_VI: case RISCV::VSSUBU_VX: case RISCV::VSSUB_VX: // 12.2. Vector Single-Width Averaging Add and Subtract diff --git a/llvm/lib/Target/RISCV/RISCVOptWInstrs.cpp b/llvm/lib/Target/RISCV/RISCVOptWInstrs.cpp index f6353aa723c2f..38417d2e9c951 100644 --- a/llvm/lib/Target/RISCV/RISCVOptWInstrs.cpp +++ b/llvm/lib/Target/RISCV/RISCVOptWInstrs.cpp @@ -93,8 +93,6 @@ static bool vectorPseudoHasAllNBitUsers(const MachineOperand &UserOp, assert(RISCVII::hasVLOp(TSFlags)); const unsigned Log2SEW = MI.getOperand(RISCVII::getSEWOpNum(MCID)).getImm(); - // TODO: The Largest VL 65,536 occurs for LMUL=8 and SEW=8 with - // VLEN=65,536. We could check if Bits < 16 here. if (UserOp.getOperandNo() == RISCVII::getVLOpNum(MCID)) return false; @@ -105,16 +103,11 @@ static bool vectorPseudoHasAllNBitUsers(const MachineOperand &UserOp, // 11.6. Vector Single-Width Shift Instructions case RISCV::VSLL_VX: - case RISCV::VSLL_VI: case RISCV::VSRL_VX: - case RISCV::VSRL_VI: case RISCV::VSRA_VX: - case RISCV::VSRA_VI: // 12.4. Vector Single-Width Scaling Shift Instructions case RISCV::VSSRL_VX: - case RISCV::VSSRL_VI: case RISCV::VSSRA_VX: - case RISCV::VSSRA_VI: // Only the low lg2(SEW) bits of the shift-amount value are used. if (Bits < Log2SEW) return false; @@ -122,14 +115,10 @@ static bool vectorPseudoHasAllNBitUsers(const MachineOperand &UserOp, // 11.7 Vector Narrowing Integer Right Shift Instructions case RISCV::VNSRL_WX: - case RISCV::VNSRL_WI: case RISCV::VNSRA_WX: - case RISCV::VNSRA_WI: // 12.5. Vector Narrowing Fixed-Point Clip Instructions case RISCV::VNCLIPU_WX: - case RISCV::VNCLIPU_WI: case RISCV::VNCLIP_WX: - case RISCV::VNCLIP_WI: // Only the low lg2(2*SEW) bits of the shift-amount value are used. if (Bits < Log2SEW + 1) return false; @@ -137,10 +126,8 @@ static bool vectorPseudoHasAllNBitUsers(const MachineOperand &UserOp, // 11.1. Vector Single-Width Integer Add and Subtract case RISCV::VADD_VX: - case RISCV::VADD_VI: case RISCV::VSUB_VX: case RISCV::VRSUB_VX: - case RISCV::VRSUB_VI: // 11.2. Vector Widening Integer Add/Subtract case RISCV::VWADDU_VX: case RISCV::VWSUBU_VX: @@ -156,32 +143,22 @@ static bool vectorPseudoHasAllNBitUsers(const MachineOperand &UserOp, case RISCV::VMADC_VXM: case RISCV::VMADC_VIM: case RISCV::VMADC_VX: - case RISCV::VMADC_VI: case RISCV::VSBC_VXM: case RISCV::VMSBC_VXM: case RISCV::VMSBC_VX: // 11.5 Vector Bitwise Logical Instructions case RISCV::VAND_VX: - case RISCV::VAND_VI: case RISCV::VOR_VX: - case RISCV::VOR_VI: case RISCV::VXOR_VX: - case RISCV::VXOR_VI: // 11.8. Vector Integer Compare Instructions case RISCV::VMSEQ_VX: - case RISCV::VMSEQ_VI: case RISCV::VMSNE_VX: - case RISCV::VMSNE_VI: case RISCV::VMSLTU_VX: case RISCV::VMSLT_VX: case RISCV::VMSLEU_VX: - case RISCV::VMSLEU_VI: case RISCV::VMSLE_VX: - case RISCV::VMSLE_VI: case RISCV::VMSGTU_VX: - case RISCV::VMSGTU_VI: case RISCV::VMSGT_VX: - case RISCV::VMSGT_VI: // 11.9. Vector Integer Min/Max Instructions case RISCV::VMINU_VX: case RISCV::VMIN_VX: @@ -219,9 +196,7 @@ static bool vectorPseudoHasAllNBitUsers(const MachineOperand &UserOp, case RISCV::VMV_V_I: // 12.1. Vector Single-Width Saturating Add and Subtract case RISCV::VSADDU_VX: - case RISCV::VSADDU_VI: case RISCV::VSADD_VX: - case RISCV::VSADD_VI: case RISCV::VSSUBU_VX: case RISCV::VSSUB_VX: // 12.2. Vector Single-Width Averaging Add and Subtract From f5340e86a437be33e647fed0153768c4762864a7 Mon Sep 17 00:00:00 2001 From: Luke Lau Date: Tue, 26 Sep 2023 17:29:07 +0100 Subject: [PATCH 3/5] Remove vmerge_vim and vmv_v_i cases --- llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp | 2 -- llvm/lib/Target/RISCV/RISCVOptWInstrs.cpp | 2 -- 2 files changed, 4 deletions(-) diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp index 36ba1fdd3e3b9..78f90722cc650 100644 --- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp @@ -2872,10 +2872,8 @@ static bool vectorPseudoHasAllNBitUsers(SDNode *User, unsigned UserOpNo, case RISCV::VWMACCUS_VX: // 11.15. Vector Integer Merge Instructions case RISCV::VMERGE_VXM: - case RISCV::VMERGE_VIM: // 11.16. Vector Integer Move Instructions case RISCV::VMV_V_X: - case RISCV::VMV_V_I: // 12.1. Vector Single-Width Saturating Add and Subtract case RISCV::VSADDU_VX: case RISCV::VSADD_VX: diff --git a/llvm/lib/Target/RISCV/RISCVOptWInstrs.cpp b/llvm/lib/Target/RISCV/RISCVOptWInstrs.cpp index 38417d2e9c951..509c9336ed289 100644 --- a/llvm/lib/Target/RISCV/RISCVOptWInstrs.cpp +++ b/llvm/lib/Target/RISCV/RISCVOptWInstrs.cpp @@ -190,10 +190,8 @@ static bool vectorPseudoHasAllNBitUsers(const MachineOperand &UserOp, case RISCV::VWMACCUS_VX: // 11.15. Vector Integer Merge Instructions case RISCV::VMERGE_VXM: - case RISCV::VMERGE_VIM: // 11.16. Vector Integer Move Instructions case RISCV::VMV_V_X: - case RISCV::VMV_V_I: // 12.1. Vector Single-Width Saturating Add and Subtract case RISCV::VSADDU_VX: case RISCV::VSADD_VX: From 6f168fd6ba999b202c85789dedfb713df013aef2 Mon Sep 17 00:00:00 2001 From: Luke Lau Date: Wed, 27 Sep 2023 12:20:54 +0100 Subject: [PATCH 4/5] Move switch into RISCVBaseInfo.{h,cpp} --- .../RISCV/MCTargetDesc/RISCVBaseInfo.cpp | 112 ++++++++++++ .../Target/RISCV/MCTargetDesc/RISCVBaseInfo.h | 5 + llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp | 170 +++--------------- llvm/lib/Target/RISCV/RISCVOptWInstrs.cpp | 154 ++-------------- 4 files changed, 158 insertions(+), 283 deletions(-) diff --git a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.cpp b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.cpp index 0a42c6faee290..95cea0c61acfd 100644 --- a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.cpp +++ b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.cpp @@ -130,6 +130,118 @@ parseFeatureBits(bool IsRV64, const FeatureBitset &FeatureBits) { } // namespace RISCVFeatures +bool RISCVII::vectorInstUsesNBitsOfScalarOp(uint16_t Opcode, unsigned Bits, + unsigned Log2SEW) { + // TODO: Handle Zvbb instructions + switch (Opcode) { + default: + return false; + + // 11.6. Vector Single-Width Shift Instructions + case RISCV::VSLL_VX: + case RISCV::VSRL_VX: + case RISCV::VSRA_VX: + // 12.4. Vector Single-Width Scaling Shift Instructions + case RISCV::VSSRL_VX: + case RISCV::VSSRA_VX: + // Only the low lg2(SEW) bits of the shift-amount value are used. + return Log2SEW <= Bits; + + // 11.7 Vector Narrowing Integer Right Shift Instructions + case RISCV::VNSRL_WX: + case RISCV::VNSRA_WX: + // 12.5. Vector Narrowing Fixed-Point Clip Instructions + case RISCV::VNCLIPU_WX: + case RISCV::VNCLIP_WX: + // Only the low lg2(2*SEW) bits of the shift-amount value are used. + return (Log2SEW + 1) <= Bits; + + // 11.1. Vector Single-Width Integer Add and Subtract + case RISCV::VADD_VX: + case RISCV::VSUB_VX: + case RISCV::VRSUB_VX: + // 11.2. Vector Widening Integer Add/Subtract + case RISCV::VWADDU_VX: + case RISCV::VWSUBU_VX: + case RISCV::VWADD_VX: + case RISCV::VWSUB_VX: + case RISCV::VWADDU_WX: + case RISCV::VWSUBU_WX: + case RISCV::VWADD_WX: + case RISCV::VWSUB_WX: + // 11.4. Vector Integer Add-with-Carry / Subtract-with-Borrow Instructions + case RISCV::VADC_VXM: + case RISCV::VADC_VIM: + case RISCV::VMADC_VXM: + case RISCV::VMADC_VIM: + case RISCV::VMADC_VX: + case RISCV::VSBC_VXM: + case RISCV::VMSBC_VXM: + case RISCV::VMSBC_VX: + // 11.5 Vector Bitwise Logical Instructions + case RISCV::VAND_VX: + case RISCV::VOR_VX: + case RISCV::VXOR_VX: + // 11.8. Vector Integer Compare Instructions + case RISCV::VMSEQ_VX: + case RISCV::VMSNE_VX: + case RISCV::VMSLTU_VX: + case RISCV::VMSLT_VX: + case RISCV::VMSLEU_VX: + case RISCV::VMSLE_VX: + case RISCV::VMSGTU_VX: + case RISCV::VMSGT_VX: + // 11.9. Vector Integer Min/Max Instructions + case RISCV::VMINU_VX: + case RISCV::VMIN_VX: + case RISCV::VMAXU_VX: + case RISCV::VMAX_VX: + // 11.10. Vector Single-Width Integer Multiply Instructions + case RISCV::VMUL_VX: + case RISCV::VMULH_VX: + case RISCV::VMULHU_VX: + case RISCV::VMULHSU_VX: + // 11.11. Vector Integer Divide Instructions + case RISCV::VDIVU_VX: + case RISCV::VDIV_VX: + case RISCV::VREMU_VX: + case RISCV::VREM_VX: + // 11.12. Vector Widening Integer Multiply Instructions + case RISCV::VWMUL_VX: + case RISCV::VWMULU_VX: + case RISCV::VWMULSU_VX: + // 11.13. Vector Single-Width Integer Multiply-Add Instructions + case RISCV::VMACC_VX: + case RISCV::VNMSAC_VX: + case RISCV::VMADD_VX: + case RISCV::VNMSUB_VX: + // 11.14. Vector Widening Integer Multiply-Add Instructions + case RISCV::VWMACCU_VX: + case RISCV::VWMACC_VX: + case RISCV::VWMACCSU_VX: + case RISCV::VWMACCUS_VX: + // 11.15. Vector Integer Merge Instructions + case RISCV::VMERGE_VXM: + // 11.16. Vector Integer Move Instructions + case RISCV::VMV_V_X: + // 12.1. Vector Single-Width Saturating Add and Subtract + case RISCV::VSADDU_VX: + case RISCV::VSADD_VX: + case RISCV::VSSUBU_VX: + case RISCV::VSSUB_VX: + // 12.2. Vector Single-Width Averaging Add and Subtract + case RISCV::VAADDU_VX: + case RISCV::VAADD_VX: + case RISCV::VASUBU_VX: + case RISCV::VASUB_VX: + // 12.3. Vector Single-Width Fractional Multiply with Rounding and Saturation + case RISCV::VSMUL_VX: + // 16.1. Integer Scalar Move Instructions + case RISCV::VMV_S_X: + return (1 << Log2SEW) <= Bits; + } +} + // Encode VTYPE into the binary format used by the the VSETVLI instruction which // is used by our MC layer representation. // diff --git a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h index 20ff26a39dc3b..222d4e9eef674 100644 --- a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h +++ b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h @@ -241,6 +241,11 @@ static inline bool isFirstDefTiedToFirstUse(const MCInstrDesc &Desc) { Desc.getOperandConstraint(Desc.getNumDefs(), MCOI::TIED_TO) == 0; } +// Returns true if the .vx vector instruction \p Opcode only uses the lower \p +// Bits for a given SEW. +bool vectorInstUsesNBitsOfScalarOp(uint16_t Opcode, unsigned Bits, + unsigned Log2SEW); + // RISC-V Specific Machine Operand Flags enum { MO_None = 0, diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp index 78f90722cc650..0b6de2b207ec3 100644 --- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp @@ -2752,148 +2752,6 @@ bool RISCVDAGToDAGISel::selectSHXADD_UWOp(SDValue N, unsigned ShAmt, return false; } -static bool vectorPseudoHasAllNBitUsers(SDNode *User, unsigned UserOpNo, - unsigned Bits, - const TargetInstrInfo *TII) { - const RISCVVPseudosTable::PseudoInfo *PseudoInfo = - RISCVVPseudosTable::getPseudoInfo(User->getMachineOpcode()); - - if (!PseudoInfo) - return false; - - const MCInstrDesc &MCID = TII->get(User->getMachineOpcode()); - const uint64_t TSFlags = MCID.TSFlags; - if (!RISCVII::hasSEWOp(TSFlags)) - return false; - assert(RISCVII::hasVLOp(TSFlags)); - - bool HasGlueOp = User->getGluedNode() != nullptr; - unsigned ChainOpIdx = User->getNumOperands() - HasGlueOp - 1; - bool HasChainOp = User->getOperand(ChainOpIdx).getValueType() == MVT::Other; - bool HasVecPolicyOp = RISCVII::hasVecPolicyOp(TSFlags); - unsigned VLIdx = - User->getNumOperands() - HasVecPolicyOp - HasChainOp - HasGlueOp - 2; - const unsigned Log2SEW = User->getConstantOperandVal(VLIdx + 1); - - if (UserOpNo == VLIdx) - return false; - - // TODO: Handle Zvbb instructions - switch (PseudoInfo->BaseInstr) { - default: - return false; - - // 11.6. Vector Single-Width Shift Instructions - case RISCV::VSLL_VX: - case RISCV::VSRL_VX: - case RISCV::VSRA_VX: - // 12.4. Vector Single-Width Scaling Shift Instructions - case RISCV::VSSRL_VX: - case RISCV::VSSRA_VX: - // Only the low lg2(SEW) bits of the shift-amount value are used. - if (Bits < Log2SEW) - return false; - break; - - // 11.7 Vector Narrowing Integer Right Shift Instructions - case RISCV::VNSRL_WX: - case RISCV::VNSRA_WX: - // 12.5. Vector Narrowing Fixed-Point Clip Instructions - case RISCV::VNCLIPU_WX: - case RISCV::VNCLIP_WX: - // Only the low lg2(2*SEW) bits of the shift-amount value are used. - if (Bits < Log2SEW + 1) - return false; - break; - - // 11.1. Vector Single-Width Integer Add and Subtract - case RISCV::VADD_VX: - case RISCV::VSUB_VX: - case RISCV::VRSUB_VX: - // 11.2. Vector Widening Integer Add/Subtract - case RISCV::VWADDU_VX: - case RISCV::VWSUBU_VX: - case RISCV::VWADD_VX: - case RISCV::VWSUB_VX: - case RISCV::VWADDU_WX: - case RISCV::VWSUBU_WX: - case RISCV::VWADD_WX: - case RISCV::VWSUB_WX: - // 11.4. Vector Integer Add-with-Carry / Subtract-with-Borrow Instructions - case RISCV::VADC_VXM: - case RISCV::VADC_VIM: - case RISCV::VMADC_VXM: - case RISCV::VMADC_VIM: - case RISCV::VMADC_VX: - case RISCV::VSBC_VXM: - case RISCV::VMSBC_VXM: - case RISCV::VMSBC_VX: - // 11.5 Vector Bitwise Logical Instructions - case RISCV::VAND_VX: - case RISCV::VOR_VX: - case RISCV::VXOR_VX: - // 11.8. Vector Integer Compare Instructions - case RISCV::VMSEQ_VX: - case RISCV::VMSNE_VX: - case RISCV::VMSLTU_VX: - case RISCV::VMSLT_VX: - case RISCV::VMSLEU_VX: - case RISCV::VMSLE_VX: - case RISCV::VMSGTU_VX: - case RISCV::VMSGT_VX: - // 11.9. Vector Integer Min/Max Instructions - case RISCV::VMINU_VX: - case RISCV::VMIN_VX: - case RISCV::VMAXU_VX: - case RISCV::VMAX_VX: - // 11.10. Vector Single-Width Integer Multiply Instructions - case RISCV::VMUL_VX: - case RISCV::VMULH_VX: - case RISCV::VMULHU_VX: - case RISCV::VMULHSU_VX: - // 11.11. Vector Integer Divide Instructions - case RISCV::VDIVU_VX: - case RISCV::VDIV_VX: - case RISCV::VREMU_VX: - case RISCV::VREM_VX: - // 11.12. Vector Widening Integer Multiply Instructions - case RISCV::VWMUL_VX: - case RISCV::VWMULU_VX: - case RISCV::VWMULSU_VX: - // 11.13. Vector Single-Width Integer Multiply-Add Instructions - case RISCV::VMACC_VX: - case RISCV::VNMSAC_VX: - case RISCV::VMADD_VX: - case RISCV::VNMSUB_VX: - // 11.14. Vector Widening Integer Multiply-Add Instructions - case RISCV::VWMACCU_VX: - case RISCV::VWMACC_VX: - case RISCV::VWMACCSU_VX: - case RISCV::VWMACCUS_VX: - // 11.15. Vector Integer Merge Instructions - case RISCV::VMERGE_VXM: - // 11.16. Vector Integer Move Instructions - case RISCV::VMV_V_X: - // 12.1. Vector Single-Width Saturating Add and Subtract - case RISCV::VSADDU_VX: - case RISCV::VSADD_VX: - case RISCV::VSSUBU_VX: - case RISCV::VSSUB_VX: - // 12.2. Vector Single-Width Averaging Add and Subtract - case RISCV::VAADDU_VX: - case RISCV::VAADD_VX: - case RISCV::VASUBU_VX: - case RISCV::VASUB_VX: - // 12.3. Vector Single-Width Fractional Multiply with Rounding and Saturation - case RISCV::VSMUL_VX: - // 16.1. Integer Scalar Move Instructions - case RISCV::VMV_S_X: - if (Bits < (1 << Log2SEW)) - return false; - } - return true; -} - // Return true if all users of this SDNode* only consume the lower \p Bits. // This can be used to form W instructions for add/sub/mul/shl even when the // root isn't a sext_inreg. This can allow the ADDW/SUBW/MULW/SLLIW to CSE if @@ -2924,10 +2782,32 @@ bool RISCVDAGToDAGISel::hasAllNBitUsers(SDNode *Node, unsigned Bits, // TODO: Add more opcodes? switch (User->getMachineOpcode()) { - default: - if (vectorPseudoHasAllNBitUsers(User, UI.getOperandNo(), Bits, TII)) - break; + default: { + if (const RISCVVPseudosTable::PseudoInfo *PseudoInfo = + RISCVVPseudosTable::getPseudoInfo(User->getMachineOpcode())) { + + const MCInstrDesc &MCID = TII->get(User->getMachineOpcode()); + if (!RISCVII::hasSEWOp(MCID.TSFlags)) + return false; + assert(RISCVII::hasVLOp(MCID.TSFlags)); + + bool HasGlueOp = User->getGluedNode() != nullptr; + unsigned ChainOpIdx = User->getNumOperands() - HasGlueOp - 1; + bool HasChainOp = + User->getOperand(ChainOpIdx).getValueType() == MVT::Other; + bool HasVecPolicyOp = RISCVII::hasVecPolicyOp(MCID.TSFlags); + unsigned VLIdx = User->getNumOperands() - HasVecPolicyOp - HasChainOp - + HasGlueOp - 2; + const unsigned Log2SEW = User->getConstantOperandVal(VLIdx + 1); + + if (UI.getOperandNo() == VLIdx) + return false; + if (RISCVII::vectorInstUsesNBitsOfScalarOp(PseudoInfo->BaseInstr, Bits, + Log2SEW)) + break; + } return false; + } case RISCV::ADDW: case RISCV::ADDIW: case RISCV::SUBW: diff --git a/llvm/lib/Target/RISCV/RISCVOptWInstrs.cpp b/llvm/lib/Target/RISCV/RISCVOptWInstrs.cpp index 509c9336ed289..1e4ce843bd913 100644 --- a/llvm/lib/Target/RISCV/RISCVOptWInstrs.cpp +++ b/llvm/lib/Target/RISCV/RISCVOptWInstrs.cpp @@ -77,141 +77,6 @@ FunctionPass *llvm::createRISCVOptWInstrsPass() { return new RISCVOptWInstrs(); } -static bool vectorPseudoHasAllNBitUsers(const MachineOperand &UserOp, - unsigned Bits) { - const MachineInstr &MI = *UserOp.getParent(); - const RISCVVPseudosTable::PseudoInfo *PseudoInfo = - RISCVVPseudosTable::getPseudoInfo(MI.getOpcode()); - - if (!PseudoInfo) - return false; - - const MCInstrDesc &MCID = MI.getDesc(); - const uint64_t TSFlags = MI.getDesc().TSFlags; - if (!RISCVII::hasSEWOp(TSFlags)) - return false; - assert(RISCVII::hasVLOp(TSFlags)); - const unsigned Log2SEW = MI.getOperand(RISCVII::getSEWOpNum(MCID)).getImm(); - - if (UserOp.getOperandNo() == RISCVII::getVLOpNum(MCID)) - return false; - - // TODO: Handle Zvbb instructions - switch (PseudoInfo->BaseInstr) { - default: - return false; - - // 11.6. Vector Single-Width Shift Instructions - case RISCV::VSLL_VX: - case RISCV::VSRL_VX: - case RISCV::VSRA_VX: - // 12.4. Vector Single-Width Scaling Shift Instructions - case RISCV::VSSRL_VX: - case RISCV::VSSRA_VX: - // Only the low lg2(SEW) bits of the shift-amount value are used. - if (Bits < Log2SEW) - return false; - break; - - // 11.7 Vector Narrowing Integer Right Shift Instructions - case RISCV::VNSRL_WX: - case RISCV::VNSRA_WX: - // 12.5. Vector Narrowing Fixed-Point Clip Instructions - case RISCV::VNCLIPU_WX: - case RISCV::VNCLIP_WX: - // Only the low lg2(2*SEW) bits of the shift-amount value are used. - if (Bits < Log2SEW + 1) - return false; - break; - - // 11.1. Vector Single-Width Integer Add and Subtract - case RISCV::VADD_VX: - case RISCV::VSUB_VX: - case RISCV::VRSUB_VX: - // 11.2. Vector Widening Integer Add/Subtract - case RISCV::VWADDU_VX: - case RISCV::VWSUBU_VX: - case RISCV::VWADD_VX: - case RISCV::VWSUB_VX: - case RISCV::VWADDU_WX: - case RISCV::VWSUBU_WX: - case RISCV::VWADD_WX: - case RISCV::VWSUB_WX: - // 11.4. Vector Integer Add-with-Carry / Subtract-with-Borrow Instructions - case RISCV::VADC_VXM: - case RISCV::VADC_VIM: - case RISCV::VMADC_VXM: - case RISCV::VMADC_VIM: - case RISCV::VMADC_VX: - case RISCV::VSBC_VXM: - case RISCV::VMSBC_VXM: - case RISCV::VMSBC_VX: - // 11.5 Vector Bitwise Logical Instructions - case RISCV::VAND_VX: - case RISCV::VOR_VX: - case RISCV::VXOR_VX: - // 11.8. Vector Integer Compare Instructions - case RISCV::VMSEQ_VX: - case RISCV::VMSNE_VX: - case RISCV::VMSLTU_VX: - case RISCV::VMSLT_VX: - case RISCV::VMSLEU_VX: - case RISCV::VMSLE_VX: - case RISCV::VMSGTU_VX: - case RISCV::VMSGT_VX: - // 11.9. Vector Integer Min/Max Instructions - case RISCV::VMINU_VX: - case RISCV::VMIN_VX: - case RISCV::VMAXU_VX: - case RISCV::VMAX_VX: - // 11.10. Vector Single-Width Integer Multiply Instructions - case RISCV::VMUL_VX: - case RISCV::VMULH_VX: - case RISCV::VMULHU_VX: - case RISCV::VMULHSU_VX: - // 11.11. Vector Integer Divide Instructions - case RISCV::VDIVU_VX: - case RISCV::VDIV_VX: - case RISCV::VREMU_VX: - case RISCV::VREM_VX: - // 11.12. Vector Widening Integer Multiply Instructions - case RISCV::VWMUL_VX: - case RISCV::VWMULU_VX: - case RISCV::VWMULSU_VX: - // 11.13. Vector Single-Width Integer Multiply-Add Instructions - case RISCV::VMACC_VX: - case RISCV::VNMSAC_VX: - case RISCV::VMADD_VX: - case RISCV::VNMSUB_VX: - // 11.14. Vector Widening Integer Multiply-Add Instructions - case RISCV::VWMACCU_VX: - case RISCV::VWMACC_VX: - case RISCV::VWMACCSU_VX: - case RISCV::VWMACCUS_VX: - // 11.15. Vector Integer Merge Instructions - case RISCV::VMERGE_VXM: - // 11.16. Vector Integer Move Instructions - case RISCV::VMV_V_X: - // 12.1. Vector Single-Width Saturating Add and Subtract - case RISCV::VSADDU_VX: - case RISCV::VSADD_VX: - case RISCV::VSSUBU_VX: - case RISCV::VSSUB_VX: - // 12.2. Vector Single-Width Averaging Add and Subtract - case RISCV::VAADDU_VX: - case RISCV::VAADD_VX: - case RISCV::VASUBU_VX: - case RISCV::VASUB_VX: - // 12.3. Vector Single-Width Fractional Multiply with Rounding and Saturation - case RISCV::VSMUL_VX: - // 16.1. Integer Scalar Move Instructions - case RISCV::VMV_S_X: - if (Bits < (1 << Log2SEW)) - return false; - } - return true; -} - // Checks if all users only demand the lower \p OrigBits of the original // instruction's result. // TODO: handle multiple interdependent transformations @@ -241,10 +106,23 @@ static bool hasAllNBitUsers(const MachineInstr &OrigMI, unsigned OpIdx = UserOp.getOperandNo(); switch (UserMI->getOpcode()) { - default: - if (vectorPseudoHasAllNBitUsers(UserOp, Bits)) - break; + default: { + if (const RISCVVPseudosTable::PseudoInfo *PseudoInfo = + RISCVVPseudosTable::getPseudoInfo(UserMI->getOpcode())) { + const MCInstrDesc &MCID = UserMI->getDesc(); + if (!RISCVII::hasSEWOp(MCID.TSFlags)) + return false; + assert(RISCVII::hasVLOp(MCID.TSFlags)); + const unsigned Log2SEW = + UserMI->getOperand(RISCVII::getSEWOpNum(MCID)).getImm(); + if (UserOp.getOperandNo() == RISCVII::getVLOpNum(MCID)) + return false; + if (RISCVII::vectorInstUsesNBitsOfScalarOp(PseudoInfo->BaseInstr, + Bits, Log2SEW)) + break; + } return false; + } case RISCV::ADDIW: case RISCV::ADDW: From caf82f1fc872f9d118df22e86cf24fd2e1edc122 Mon Sep 17 00:00:00 2001 From: Luke Lau Date: Wed, 27 Sep 2023 12:25:19 +0100 Subject: [PATCH 5/5] Remove unused test prefixes --- llvm/test/CodeGen/RISCV/rvv/constant-folding.ll | 9 ++------- .../CodeGen/RISCV/rvv/fixed-vectors-deinterleave-load.ll | 7 ++----- llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-fixed.ll | 7 ++----- llvm/test/CodeGen/RISCV/rvv/vmulh-sdnode.ll | 7 ++----- 4 files changed, 8 insertions(+), 22 deletions(-) diff --git a/llvm/test/CodeGen/RISCV/rvv/constant-folding.ll b/llvm/test/CodeGen/RISCV/rvv/constant-folding.ll index 98bc4081b3a34..b3f561a52f411 100644 --- a/llvm/test/CodeGen/RISCV/rvv/constant-folding.ll +++ b/llvm/test/CodeGen/RISCV/rvv/constant-folding.ll @@ -1,8 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s \ -; RUN: | FileCheck %s --check-prefixes=CHECK,RV32 -; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ -; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 +; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s ; These tests check that the scalable-vector version of this series of ; instructions does not get into an infinite DAGCombine loop. This was @@ -52,6 +50,3 @@ define @scalable( %x) { %v48 = and %v44, %v47 ret %v48 } -;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: -; RV32: {{.*}} -; RV64: {{.*}} diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-deinterleave-load.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-deinterleave-load.ll index 8e298d962edf1..25177734325ce 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-deinterleave-load.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-deinterleave-load.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s -mtriple=riscv32 -mattr=+v,+zfh,+zvfh | FileCheck %s -check-prefixes=CHECK,RV32 -; RUN: llc < %s -mtriple=riscv64 -mattr=+v,+zfh,+zvfh | FileCheck %s -check-prefixes=CHECK,RV64 +; RUN: llc < %s -mtriple=riscv32 -mattr=+v,+zfh,+zvfh | FileCheck %s +; RUN: llc < %s -mtriple=riscv64 -mattr=+v,+zfh,+zvfh | FileCheck %s ; Integers @@ -179,6 +179,3 @@ declare {<2 x float>, <2 x float>} @llvm.experimental.vector.deinterleave2.v4f32 declare {<8 x half>, <8 x half>} @llvm.experimental.vector.deinterleave2.v16f16(<16 x half>) declare {<4 x float>, <4 x float>} @llvm.experimental.vector.deinterleave2.v8f32(<8 x float>) declare {<2 x double>, <2 x double>} @llvm.experimental.vector.deinterleave2.v4f64(<4 x double>) -;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: -; RV32: {{.*}} -; RV64: {{.*}} diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-fixed.ll b/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-fixed.ll index 5be8530190b2f..f7ccf2c32cde0 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-fixed.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-fixed.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s -mtriple=riscv32 -mattr=+v,+zfh,+zvfh | FileCheck --check-prefixes=CHECK,RV32 %s -; RUN: llc < %s -mtriple=riscv64 -mattr=+v,+zfh,+zvfh | FileCheck --check-prefixes=CHECK,RV64 %s +; RUN: llc < %s -mtriple=riscv32 -mattr=+v,+zfh,+zvfh | FileCheck %s +; RUN: llc < %s -mtriple=riscv64 -mattr=+v,+zfh,+zvfh | FileCheck %s ; Integers @@ -184,6 +184,3 @@ declare {<2 x float>, <2 x float>} @llvm.experimental.vector.deinterleave2.v4f32 declare {<8 x half>, <8 x half>} @llvm.experimental.vector.deinterleave2.v16f16(<16 x half>) declare {<4 x float>, <4 x float>} @llvm.experimental.vector.deinterleave2.v8f32(<8 x float>) declare {<2 x double>, <2 x double>} @llvm.experimental.vector.deinterleave2.v4f64(<4 x double>) -;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: -; RV32: {{.*}} -; RV64: {{.*}} diff --git a/llvm/test/CodeGen/RISCV/rvv/vmulh-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vmulh-sdnode.ll index 69191ebb9256d..0fda7909df313 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmulh-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmulh-sdnode.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32 -; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64 +; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s ; Test that the prepareSREMEqFold optimization doesn't crash on scalable ; vector types. @@ -312,6 +312,3 @@ define @vmulh_vi_nxv8i32_1( %va) { %vf = trunc %ve to ret %vf } -;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: -; RV32: {{.*}} -; RV64: {{.*}}