From 858b465bbe8ff56676cb2b91c958b4c27ef06105 Mon Sep 17 00:00:00 2001 From: Luke Lau Date: Tue, 19 Sep 2023 13:54:57 +0100 Subject: [PATCH 1/3] [RISCV] Add tests for VP strided stores with unit stride. NFC --- .../rvv/fixed-vectors-strided-vpstore.ll | 85 +++++++++++++++- .../test/CodeGen/RISCV/rvv/strided-vpstore.ll | 97 +++++++++++++++++-- 2 files changed, 168 insertions(+), 14 deletions(-) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpstore.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpstore.ll index 781be5f607da1..c8abb69837875 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpstore.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpstore.ll @@ -84,6 +84,17 @@ define void @strided_vpstore_v8i8(<8 x i8> %val, ptr %ptr, i32 signext %stride, ret void } +define void @strided_vpstore_v8i8_unit_stride(<8 x i8> %val, ptr %ptr, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: strided_vpstore_v8i8_unit_stride: +; CHECK: # %bb.0: +; CHECK-NEXT: li a2, 1 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsse8.v v8, (a0), a2, v0.t +; CHECK-NEXT: ret + call void @llvm.experimental.vp.strided.store.v8i8.p0.i32(<8 x i8> %val, ptr %ptr, i32 1, <8 x i1> %m, i32 %evl) + ret void +} + declare void @llvm.experimental.vp.strided.store.v2i16.p0.i32(<2 x i16>, ptr, i32, <2 x i1>, i32) define void @strided_vpstore_v2i16(<2 x i16> %val, ptr %ptr, i32 signext %stride, <2 x i1> %m, i32 zeroext %evl) { @@ -120,6 +131,17 @@ define void @strided_vpstore_v8i16(<8 x i16> %val, ptr %ptr, i32 signext %stride ret void } +define void @strided_vpstore_v8i16_unit_stride(<8 x i16> %val, ptr %ptr, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: strided_vpstore_v8i16_unit_stride: +; CHECK: # %bb.0: +; CHECK-NEXT: li a2, 2 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsse16.v v8, (a0), a2, v0.t +; CHECK-NEXT: ret + call void @llvm.experimental.vp.strided.store.v8i16.p0.i32(<8 x i16> %val, ptr %ptr, i32 2, <8 x i1> %m, i32 %evl) + ret void +} + declare void @llvm.experimental.vp.strided.store.v2i32.p0.i32(<2 x i32>, ptr, i32, <2 x i1>, i32) define void @strided_vpstore_v2i32(<2 x i32> %val, ptr %ptr, i32 signext %stride, <2 x i1> %m, i32 zeroext %evl) { @@ -144,6 +166,17 @@ define void @strided_vpstore_v4i32(<4 x i32> %val, ptr %ptr, i32 signext %stride ret void } +define void @strided_vpstore_v4i32_unit_stride(<4 x i32> %val, ptr %ptr, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: strided_vpstore_v4i32_unit_stride: +; CHECK: # %bb.0: +; CHECK-NEXT: li a2, 4 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsse32.v v8, (a0), a2, v0.t +; CHECK-NEXT: ret + call void @llvm.experimental.vp.strided.store.v4i32.p0.i32(<4 x i32> %val, ptr %ptr, i32 4, <4 x i1> %m, i32 %evl) + ret void +} + declare void @llvm.experimental.vp.strided.store.v8i32.p0.i32(<8 x i32>, ptr, i32, <8 x i1>, i32) define void @strided_vpstore_v8i32(<8 x i32> %val, ptr %ptr, i32 signext %stride, <8 x i1> %m, i32 zeroext %evl) { @@ -168,6 +201,17 @@ define void @strided_vpstore_v2i64(<2 x i64> %val, ptr %ptr, i32 signext %stride ret void } +define void @strided_vpstore_v2i64_unit_stride(<2 x i64> %val, ptr %ptr, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: strided_vpstore_v2i64_unit_stride: +; CHECK: # %bb.0: +; CHECK-NEXT: li a2, 8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsse64.v v8, (a0), a2, v0.t +; CHECK-NEXT: ret + call void @llvm.experimental.vp.strided.store.v2i64.p0.i32(<2 x i64> %val, ptr %ptr, i32 8, <2 x i1> %m, i32 %evl) + ret void +} + declare void @llvm.experimental.vp.strided.store.v4i64.p0.i32(<4 x i64>, ptr, i32, <4 x i1>, i32) define void @strided_vpstore_v4i64(<4 x i64> %val, ptr %ptr, i32 signext %stride, <4 x i1> %m, i32 zeroext %evl) { @@ -228,6 +272,17 @@ define void @strided_vpstore_v8f16(<8 x half> %val, ptr %ptr, i32 signext %strid ret void } +define void @strided_vpstore_v8f16_unit_stride(<8 x half> %val, ptr %ptr, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: strided_vpstore_v8f16_unit_stride: +; CHECK: # %bb.0: +; CHECK-NEXT: li a2, 2 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsse16.v v8, (a0), a2, v0.t +; CHECK-NEXT: ret + call void @llvm.experimental.vp.strided.store.v8f16.p0.i32(<8 x half> %val, ptr %ptr, i32 2, <8 x i1> %m, i32 %evl) + ret void +} + declare void @llvm.experimental.vp.strided.store.v2f32.p0.i32(<2 x float>, ptr, i32, <2 x i1>, i32) define void @strided_vpstore_v2f32(<2 x float> %val, ptr %ptr, i32 signext %stride, <2 x i1> %m, i32 zeroext %evl) { @@ -252,6 +307,17 @@ define void @strided_vpstore_v4f32(<4 x float> %val, ptr %ptr, i32 signext %stri ret void } +define void @strided_vpstore_v4f32_unit_stride(<4 x float> %val, ptr %ptr, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: strided_vpstore_v4f32_unit_stride: +; CHECK: # %bb.0: +; CHECK-NEXT: li a2, 4 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsse32.v v8, (a0), a2, v0.t +; CHECK-NEXT: ret + call void @llvm.experimental.vp.strided.store.v4f32.p0.i32(<4 x float> %val, ptr %ptr, i32 4, <4 x i1> %m, i32 %evl) + ret void +} + declare void @llvm.experimental.vp.strided.store.v8f32.p0.i32(<8 x float>, ptr, i32, <8 x i1>, i32) define void @strided_vpstore_v8f32(<8 x float> %val, ptr %ptr, i32 signext %stride, <8 x i1> %m, i32 zeroext %evl) { @@ -276,6 +342,17 @@ define void @strided_vpstore_v2f64(<2 x double> %val, ptr %ptr, i32 signext %str ret void } +define void @strided_vpstore_v2f64_unit_stride(<2 x double> %val, ptr %ptr, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: strided_vpstore_v2f64_unit_stride: +; CHECK: # %bb.0: +; CHECK-NEXT: li a2, 8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsse64.v v8, (a0), a2, v0.t +; CHECK-NEXT: ret + call void @llvm.experimental.vp.strided.store.v2f64.p0.i32(<2 x double> %val, ptr %ptr, i32 8, <2 x i1> %m, i32 %evl) + ret void +} + declare void @llvm.experimental.vp.strided.store.v4f64.p0.i32(<4 x double>, ptr, i32, <4 x i1>, i32) define void @strided_vpstore_v4f64(<4 x double> %val, ptr %ptr, i32 signext %stride, <4 x i1> %m, i32 zeroext %evl) { @@ -343,10 +420,10 @@ define void @strided_store_v32f64(<32 x double> %v, ptr %ptr, i32 signext %strid ; CHECK: # %bb.0: ; CHECK-NEXT: li a4, 16 ; CHECK-NEXT: mv a3, a2 -; CHECK-NEXT: bltu a2, a4, .LBB27_2 +; CHECK-NEXT: bltu a2, a4, .LBB34_2 ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: li a3, 16 -; CHECK-NEXT: .LBB27_2: +; CHECK-NEXT: .LBB34_2: ; CHECK-NEXT: vsetvli zero, a3, e64, m8, ta, ma ; CHECK-NEXT: vsse64.v v8, (a0), a1, v0.t ; CHECK-NEXT: mul a3, a3, a1 @@ -369,10 +446,10 @@ define void @strided_store_v32f64_allones_mask(<32 x double> %v, ptr %ptr, i32 s ; CHECK: # %bb.0: ; CHECK-NEXT: li a4, 16 ; CHECK-NEXT: mv a3, a2 -; CHECK-NEXT: bltu a2, a4, .LBB28_2 +; CHECK-NEXT: bltu a2, a4, .LBB35_2 ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: li a3, 16 -; CHECK-NEXT: .LBB28_2: +; CHECK-NEXT: .LBB35_2: ; CHECK-NEXT: vsetvli zero, a3, e64, m8, ta, ma ; CHECK-NEXT: vsse64.v v8, (a0), a1 ; CHECK-NEXT: mul a3, a3, a1 diff --git a/llvm/test/CodeGen/RISCV/rvv/strided-vpstore.ll b/llvm/test/CodeGen/RISCV/rvv/strided-vpstore.ll index b7b9769e82d1b..2f5d033a8ed15 100644 --- a/llvm/test/CodeGen/RISCV/rvv/strided-vpstore.ll +++ b/llvm/test/CodeGen/RISCV/rvv/strided-vpstore.ll @@ -96,6 +96,17 @@ define void @strided_vpstore_nxv8i8( %val, ptr %ptr, i32 signex ret void } +define void @strided_vpstore_nxv8i8_unit_stride( %val, ptr %ptr, %m, i32 zeroext %evl) { +; CHECK-LABEL: strided_vpstore_nxv8i8_unit_stride: +; CHECK: # %bb.0: +; CHECK-NEXT: li a2, 1 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsse8.v v8, (a0), a2, v0.t +; CHECK-NEXT: ret + call void @llvm.experimental.vp.strided.store.nxv8i8.p0.i32( %val, ptr %ptr, i32 1, %m, i32 %evl) + ret void +} + declare void @llvm.experimental.vp.strided.store.nxv1i16.p0.i32(, ptr, i32, , i32) define void @strided_vpstore_nxv1i16( %val, ptr %ptr, i32 signext %strided, %m, i32 zeroext %evl) { @@ -132,6 +143,17 @@ define void @strided_vpstore_nxv4i16( %val, ptr %ptr, i32 sign ret void } +define void @strided_vpstore_nxv4i16_unit_stride( %val, ptr %ptr, %m, i32 zeroext %evl) { +; CHECK-LABEL: strided_vpstore_nxv4i16_unit_stride: +; CHECK: # %bb.0: +; CHECK-NEXT: li a2, 2 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsse16.v v8, (a0), a2, v0.t +; CHECK-NEXT: ret + call void @llvm.experimental.vp.strided.store.nxv4i16.p0.i32( %val, ptr %ptr, i32 2, %m, i32 %evl) + ret void +} + declare void @llvm.experimental.vp.strided.store.nxv8i16.p0.i32(, ptr, i32, , i32) define void @strided_vpstore_nxv8i16( %val, ptr %ptr, i32 signext %strided, %m, i32 zeroext %evl) { @@ -180,6 +202,17 @@ define void @strided_vpstore_nxv4i32( %val, ptr %ptr, i32 sign ret void } +define void @strided_vpstore_nxv4i32_unit_stride( %val, ptr %ptr, %m, i32 zeroext %evl) { +; CHECK-LABEL: strided_vpstore_nxv4i32_unit_stride: +; CHECK: # %bb.0: +; CHECK-NEXT: li a2, 4 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsse32.v v8, (a0), a2, v0.t +; CHECK-NEXT: ret + call void @llvm.experimental.vp.strided.store.nxv4i32.p0.i32( %val, ptr %ptr, i32 4, %m, i32 %evl) + ret void +} + declare void @llvm.experimental.vp.strided.store.nxv8i32.p0.i32(, ptr, i32, , i32) define void @strided_vpstore_nxv8i32( %val, ptr %ptr, i32 signext %strided, %m, i32 zeroext %evl) { @@ -204,6 +237,17 @@ define void @strided_vpstore_nxv1i64( %val, ptr %ptr, i32 sign ret void } +define void @strided_vpstore_nxv1i64_unit_stride( %val, ptr %ptr, %m, i32 zeroext %evl) { +; CHECK-LABEL: strided_vpstore_nxv1i64_unit_stride: +; CHECK: # %bb.0: +; CHECK-NEXT: li a2, 8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsse64.v v8, (a0), a2, v0.t +; CHECK-NEXT: ret + call void @llvm.experimental.vp.strided.store.nxv1i64.p0.i32( %val, ptr %ptr, i32 8, %m, i32 %evl) + ret void +} + declare void @llvm.experimental.vp.strided.store.nxv2i64.p0.i32(, ptr, i32, , i32) define void @strided_vpstore_nxv2i64( %val, ptr %ptr, i32 signext %strided, %m, i32 zeroext %evl) { @@ -276,6 +320,17 @@ define void @strided_vpstore_nxv4f16( %val, ptr %ptr, i32 sig ret void } +define void @strided_vpstore_nxv4f16_unit_stride( %val, ptr %ptr, %m, i32 zeroext %evl) { +; CHECK-LABEL: strided_vpstore_nxv4f16_unit_stride: +; CHECK: # %bb.0: +; CHECK-NEXT: li a2, 2 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsse16.v v8, (a0), a2, v0.t +; CHECK-NEXT: ret + call void @llvm.experimental.vp.strided.store.nxv4f16.p0.i32( %val, ptr %ptr, i32 2, %m, i32 %evl) + ret void +} + declare void @llvm.experimental.vp.strided.store.nxv8f16.p0.i32(, ptr, i32, , i32) define void @strided_vpstore_nxv8f16( %val, ptr %ptr, i32 signext %strided, %m, i32 zeroext %evl) { @@ -324,6 +379,17 @@ define void @strided_vpstore_nxv4f32( %val, ptr %ptr, i32 si ret void } +define void @strided_vpstore_nxv4f32_unit_stride( %val, ptr %ptr, %m, i32 zeroext %evl) { +; CHECK-LABEL: strided_vpstore_nxv4f32_unit_stride: +; CHECK: # %bb.0: +; CHECK-NEXT: li a2, 4 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsse32.v v8, (a0), a2, v0.t +; CHECK-NEXT: ret + call void @llvm.experimental.vp.strided.store.nxv4f32.p0.i32( %val, ptr %ptr, i32 4, %m, i32 %evl) + ret void +} + declare void @llvm.experimental.vp.strided.store.nxv8f32.p0.i32(, ptr, i32, , i32) define void @strided_vpstore_nxv8f32( %val, ptr %ptr, i32 signext %strided, %m, i32 zeroext %evl) { @@ -348,6 +414,17 @@ define void @strided_vpstore_nxv1f64( %val, ptr %ptr, i32 s ret void } +define void @strided_vpstore_nxv1f64_unit_stride( %val, ptr %ptr, %m, i32 zeroext %evl) { +; CHECK-LABEL: strided_vpstore_nxv1f64_unit_stride: +; CHECK: # %bb.0: +; CHECK-NEXT: li a2, 8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsse64.v v8, (a0), a2, v0.t +; CHECK-NEXT: ret + call void @llvm.experimental.vp.strided.store.nxv1f64.p0.i32( %val, ptr %ptr, i32 8, %m, i32 %evl) + ret void +} + declare void @llvm.experimental.vp.strided.store.nxv2f64.p0.i32(, ptr, i32, , i32) define void @strided_vpstore_nxv2f64( %val, ptr %ptr, i32 signext %strided, %m, i32 zeroext %evl) { @@ -427,10 +504,10 @@ define void @strided_store_nxv16f64( %v, ptr %ptr, i32 sig ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a3, vlenb ; CHECK-NEXT: mv a4, a2 -; CHECK-NEXT: bltu a2, a3, .LBB34_2 +; CHECK-NEXT: bltu a2, a3, .LBB41_2 ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a4, a3 -; CHECK-NEXT: .LBB34_2: +; CHECK-NEXT: .LBB41_2: ; CHECK-NEXT: vsetvli zero, a4, e64, m8, ta, ma ; CHECK-NEXT: vsse64.v v8, (a0), a1, v0.t ; CHECK-NEXT: sub a5, a2, a3 @@ -454,10 +531,10 @@ define void @strided_store_nxv16f64_allones_mask( %v, ptr ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a3, vlenb ; CHECK-NEXT: mv a4, a2 -; CHECK-NEXT: bltu a2, a3, .LBB35_2 +; CHECK-NEXT: bltu a2, a3, .LBB42_2 ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a4, a3 -; CHECK-NEXT: .LBB35_2: +; CHECK-NEXT: .LBB42_2: ; CHECK-NEXT: vsetvli zero, a4, e64, m8, ta, ma ; CHECK-NEXT: vsse64.v v8, (a0), a1 ; CHECK-NEXT: sub a3, a2, a3 @@ -485,15 +562,15 @@ define void @strided_store_nxv17f64( %v, ptr %ptr, i32 sig ; CHECK-NEXT: slli a6, a4, 1 ; CHECK-NEXT: vmv1r.v v24, v0 ; CHECK-NEXT: mv a5, a3 -; CHECK-NEXT: bltu a3, a6, .LBB36_2 +; CHECK-NEXT: bltu a3, a6, .LBB43_2 ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a5, a6 -; CHECK-NEXT: .LBB36_2: +; CHECK-NEXT: .LBB43_2: ; CHECK-NEXT: mv a7, a5 -; CHECK-NEXT: bltu a5, a4, .LBB36_4 +; CHECK-NEXT: bltu a5, a4, .LBB43_4 ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: mv a7, a4 -; CHECK-NEXT: .LBB36_4: +; CHECK-NEXT: .LBB43_4: ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: csrr t0, vlenb @@ -521,10 +598,10 @@ define void @strided_store_nxv17f64( %v, ptr %ptr, i32 sig ; CHECK-NEXT: addi a3, a3, -1 ; CHECK-NEXT: and a0, a3, a0 ; CHECK-NEXT: vsse64.v v16, (a7), a2, v0.t -; CHECK-NEXT: bltu a0, a4, .LBB36_6 +; CHECK-NEXT: bltu a0, a4, .LBB43_6 ; CHECK-NEXT: # %bb.5: ; CHECK-NEXT: mv a0, a4 -; CHECK-NEXT: .LBB36_6: +; CHECK-NEXT: .LBB43_6: ; CHECK-NEXT: mul a3, a5, a2 ; CHECK-NEXT: add a1, a1, a3 ; CHECK-NEXT: srli a4, a4, 2 From f2aa5012e640f88a83396efd281a434f75bd0564 Mon Sep 17 00:00:00 2001 From: Luke Lau Date: Tue, 19 Sep 2023 14:05:21 +0100 Subject: [PATCH 2/3] [DAGCombiner] Combine vp.strided.store with unit stride to vp.store This is the VP equivalent of #66677. If we have a strided store where the stride is equal to the element width, we can just use a regular VP store. --- llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp | 20 ++++++++++++++++++ .../rvv/fixed-vectors-strided-vpstore.ll | 21 +++++++------------ .../test/CodeGen/RISCV/rvv/strided-vpstore.ll | 21 +++++++------------ 3 files changed, 34 insertions(+), 28 deletions(-) diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp index 20a89f24603d0..d246b8a0ed4a8 100644 --- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -540,6 +540,7 @@ namespace { SDValue visitVPGATHER(SDNode *N); SDValue visitVPSCATTER(SDNode *N); SDValue visitVP_STRIDED_LOAD(SDNode *N); + SDValue visitVP_STRIDED_STORE(SDNode *N); SDValue visitFP_TO_FP16(SDNode *N); SDValue visitFP16_TO_FP(SDNode *N); SDValue visitFP_TO_BF16(SDNode *N); @@ -11873,6 +11874,21 @@ SDValue DAGCombiner::visitMSTORE(SDNode *N) { return SDValue(); } +SDValue DAGCombiner::visitVP_STRIDED_STORE(SDNode *N) { + auto *SST = cast(N); + EVT EltVT = SST->getValue().getValueType().getVectorElementType(); + // Combine strided loads with unit-stride to a regular load. + if (auto *CStride = dyn_cast(SST->getStride()); + CStride && CStride->getZExtValue() == EltVT.getStoreSize()) { + return DAG.getStoreVP(SST->getChain(), SDLoc(N), SST->getValue(), + SST->getBasePtr(), SST->getOffset(), SST->getMask(), + SST->getVectorLength(), SST->getMemoryVT(), + SST->getMemOperand(), SST->getAddressingMode(), + SST->isTruncatingStore(), SST->isCompressingStore()); + } + return SDValue(); +} + SDValue DAGCombiner::visitVPGATHER(SDNode *N) { VPGatherSDNode *MGT = cast(N); SDValue Mask = MGT->getMask(); @@ -25997,6 +26013,10 @@ SDValue DAGCombiner::visitVPOp(SDNode *N) { if (SDValue SD = visitVP_STRIDED_LOAD(N)) return SD; + if (N->getOpcode() == ISD::EXPERIMENTAL_VP_STRIDED_STORE) + if (SDValue SD = visitVP_STRIDED_STORE(N)) + return SD; + // VP operations in which all vector elements are disabled - either by // determining that the mask is all false or that the EVL is 0 - can be // eliminated. diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpstore.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpstore.ll index c8abb69837875..6c4960bd40784 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpstore.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpstore.ll @@ -87,9 +87,8 @@ define void @strided_vpstore_v8i8(<8 x i8> %val, ptr %ptr, i32 signext %stride, define void @strided_vpstore_v8i8_unit_stride(<8 x i8> %val, ptr %ptr, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_v8i8_unit_stride: ; CHECK: # %bb.0: -; CHECK-NEXT: li a2, 1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsse8.v v8, (a0), a2, v0.t +; CHECK-NEXT: vse8.v v8, (a0), v0.t ; CHECK-NEXT: ret call void @llvm.experimental.vp.strided.store.v8i8.p0.i32(<8 x i8> %val, ptr %ptr, i32 1, <8 x i1> %m, i32 %evl) ret void @@ -134,9 +133,8 @@ define void @strided_vpstore_v8i16(<8 x i16> %val, ptr %ptr, i32 signext %stride define void @strided_vpstore_v8i16_unit_stride(<8 x i16> %val, ptr %ptr, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_v8i16_unit_stride: ; CHECK: # %bb.0: -; CHECK-NEXT: li a2, 2 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsse16.v v8, (a0), a2, v0.t +; CHECK-NEXT: vse16.v v8, (a0), v0.t ; CHECK-NEXT: ret call void @llvm.experimental.vp.strided.store.v8i16.p0.i32(<8 x i16> %val, ptr %ptr, i32 2, <8 x i1> %m, i32 %evl) ret void @@ -169,9 +167,8 @@ define void @strided_vpstore_v4i32(<4 x i32> %val, ptr %ptr, i32 signext %stride define void @strided_vpstore_v4i32_unit_stride(<4 x i32> %val, ptr %ptr, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_v4i32_unit_stride: ; CHECK: # %bb.0: -; CHECK-NEXT: li a2, 4 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsse32.v v8, (a0), a2, v0.t +; CHECK-NEXT: vse32.v v8, (a0), v0.t ; CHECK-NEXT: ret call void @llvm.experimental.vp.strided.store.v4i32.p0.i32(<4 x i32> %val, ptr %ptr, i32 4, <4 x i1> %m, i32 %evl) ret void @@ -204,9 +201,8 @@ define void @strided_vpstore_v2i64(<2 x i64> %val, ptr %ptr, i32 signext %stride define void @strided_vpstore_v2i64_unit_stride(<2 x i64> %val, ptr %ptr, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_v2i64_unit_stride: ; CHECK: # %bb.0: -; CHECK-NEXT: li a2, 8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsse64.v v8, (a0), a2, v0.t +; CHECK-NEXT: vse64.v v8, (a0), v0.t ; CHECK-NEXT: ret call void @llvm.experimental.vp.strided.store.v2i64.p0.i32(<2 x i64> %val, ptr %ptr, i32 8, <2 x i1> %m, i32 %evl) ret void @@ -275,9 +271,8 @@ define void @strided_vpstore_v8f16(<8 x half> %val, ptr %ptr, i32 signext %strid define void @strided_vpstore_v8f16_unit_stride(<8 x half> %val, ptr %ptr, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_v8f16_unit_stride: ; CHECK: # %bb.0: -; CHECK-NEXT: li a2, 2 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsse16.v v8, (a0), a2, v0.t +; CHECK-NEXT: vse16.v v8, (a0), v0.t ; CHECK-NEXT: ret call void @llvm.experimental.vp.strided.store.v8f16.p0.i32(<8 x half> %val, ptr %ptr, i32 2, <8 x i1> %m, i32 %evl) ret void @@ -310,9 +305,8 @@ define void @strided_vpstore_v4f32(<4 x float> %val, ptr %ptr, i32 signext %stri define void @strided_vpstore_v4f32_unit_stride(<4 x float> %val, ptr %ptr, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_v4f32_unit_stride: ; CHECK: # %bb.0: -; CHECK-NEXT: li a2, 4 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsse32.v v8, (a0), a2, v0.t +; CHECK-NEXT: vse32.v v8, (a0), v0.t ; CHECK-NEXT: ret call void @llvm.experimental.vp.strided.store.v4f32.p0.i32(<4 x float> %val, ptr %ptr, i32 4, <4 x i1> %m, i32 %evl) ret void @@ -345,9 +339,8 @@ define void @strided_vpstore_v2f64(<2 x double> %val, ptr %ptr, i32 signext %str define void @strided_vpstore_v2f64_unit_stride(<2 x double> %val, ptr %ptr, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_v2f64_unit_stride: ; CHECK: # %bb.0: -; CHECK-NEXT: li a2, 8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsse64.v v8, (a0), a2, v0.t +; CHECK-NEXT: vse64.v v8, (a0), v0.t ; CHECK-NEXT: ret call void @llvm.experimental.vp.strided.store.v2f64.p0.i32(<2 x double> %val, ptr %ptr, i32 8, <2 x i1> %m, i32 %evl) ret void diff --git a/llvm/test/CodeGen/RISCV/rvv/strided-vpstore.ll b/llvm/test/CodeGen/RISCV/rvv/strided-vpstore.ll index 2f5d033a8ed15..cf6ce89b9b5a4 100644 --- a/llvm/test/CodeGen/RISCV/rvv/strided-vpstore.ll +++ b/llvm/test/CodeGen/RISCV/rvv/strided-vpstore.ll @@ -99,9 +99,8 @@ define void @strided_vpstore_nxv8i8( %val, ptr %ptr, i32 signex define void @strided_vpstore_nxv8i8_unit_stride( %val, ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_nxv8i8_unit_stride: ; CHECK: # %bb.0: -; CHECK-NEXT: li a2, 1 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsse8.v v8, (a0), a2, v0.t +; CHECK-NEXT: vse8.v v8, (a0), v0.t ; CHECK-NEXT: ret call void @llvm.experimental.vp.strided.store.nxv8i8.p0.i32( %val, ptr %ptr, i32 1, %m, i32 %evl) ret void @@ -146,9 +145,8 @@ define void @strided_vpstore_nxv4i16( %val, ptr %ptr, i32 sign define void @strided_vpstore_nxv4i16_unit_stride( %val, ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_nxv4i16_unit_stride: ; CHECK: # %bb.0: -; CHECK-NEXT: li a2, 2 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsse16.v v8, (a0), a2, v0.t +; CHECK-NEXT: vse16.v v8, (a0), v0.t ; CHECK-NEXT: ret call void @llvm.experimental.vp.strided.store.nxv4i16.p0.i32( %val, ptr %ptr, i32 2, %m, i32 %evl) ret void @@ -205,9 +203,8 @@ define void @strided_vpstore_nxv4i32( %val, ptr %ptr, i32 sign define void @strided_vpstore_nxv4i32_unit_stride( %val, ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_nxv4i32_unit_stride: ; CHECK: # %bb.0: -; CHECK-NEXT: li a2, 4 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsse32.v v8, (a0), a2, v0.t +; CHECK-NEXT: vse32.v v8, (a0), v0.t ; CHECK-NEXT: ret call void @llvm.experimental.vp.strided.store.nxv4i32.p0.i32( %val, ptr %ptr, i32 4, %m, i32 %evl) ret void @@ -240,9 +237,8 @@ define void @strided_vpstore_nxv1i64( %val, ptr %ptr, i32 sign define void @strided_vpstore_nxv1i64_unit_stride( %val, ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_nxv1i64_unit_stride: ; CHECK: # %bb.0: -; CHECK-NEXT: li a2, 8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsse64.v v8, (a0), a2, v0.t +; CHECK-NEXT: vse64.v v8, (a0), v0.t ; CHECK-NEXT: ret call void @llvm.experimental.vp.strided.store.nxv1i64.p0.i32( %val, ptr %ptr, i32 8, %m, i32 %evl) ret void @@ -323,9 +319,8 @@ define void @strided_vpstore_nxv4f16( %val, ptr %ptr, i32 sig define void @strided_vpstore_nxv4f16_unit_stride( %val, ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_nxv4f16_unit_stride: ; CHECK: # %bb.0: -; CHECK-NEXT: li a2, 2 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsse16.v v8, (a0), a2, v0.t +; CHECK-NEXT: vse16.v v8, (a0), v0.t ; CHECK-NEXT: ret call void @llvm.experimental.vp.strided.store.nxv4f16.p0.i32( %val, ptr %ptr, i32 2, %m, i32 %evl) ret void @@ -382,9 +377,8 @@ define void @strided_vpstore_nxv4f32( %val, ptr %ptr, i32 si define void @strided_vpstore_nxv4f32_unit_stride( %val, ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_nxv4f32_unit_stride: ; CHECK: # %bb.0: -; CHECK-NEXT: li a2, 4 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsse32.v v8, (a0), a2, v0.t +; CHECK-NEXT: vse32.v v8, (a0), v0.t ; CHECK-NEXT: ret call void @llvm.experimental.vp.strided.store.nxv4f32.p0.i32( %val, ptr %ptr, i32 4, %m, i32 %evl) ret void @@ -417,9 +411,8 @@ define void @strided_vpstore_nxv1f64( %val, ptr %ptr, i32 s define void @strided_vpstore_nxv1f64_unit_stride( %val, ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_nxv1f64_unit_stride: ; CHECK: # %bb.0: -; CHECK-NEXT: li a2, 8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsse64.v v8, (a0), a2, v0.t +; CHECK-NEXT: vse64.v v8, (a0), v0.t ; CHECK-NEXT: ret call void @llvm.experimental.vp.strided.store.nxv1f64.p0.i32( %val, ptr %ptr, i32 8, %m, i32 %evl) ret void From 703068c4426b6e8686b05f1bb87387678da3f922 Mon Sep 17 00:00:00 2001 From: Luke Lau Date: Tue, 19 Sep 2023 16:42:18 +0100 Subject: [PATCH 3/3] Fix comment --- llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp index d246b8a0ed4a8..db1ebe0e26b9a 100644 --- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -11877,7 +11877,7 @@ SDValue DAGCombiner::visitMSTORE(SDNode *N) { SDValue DAGCombiner::visitVP_STRIDED_STORE(SDNode *N) { auto *SST = cast(N); EVT EltVT = SST->getValue().getValueType().getVectorElementType(); - // Combine strided loads with unit-stride to a regular load. + // Combine strided stores with unit-stride to a regular VP store. if (auto *CStride = dyn_cast(SST->getStride()); CStride && CStride->getZExtValue() == EltVT.getStoreSize()) { return DAG.getStoreVP(SST->getChain(), SDLoc(N), SST->getValue(),