diff --git a/llvm/lib/Target/AMDGPU/AMDGPUAtomicOptimizer.cpp b/llvm/lib/Target/AMDGPU/AMDGPUAtomicOptimizer.cpp index 8062bc13f9a93..f042b49b22306 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUAtomicOptimizer.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUAtomicOptimizer.cpp @@ -224,7 +224,14 @@ void AMDGPUAtomicOptimizerImpl::visitAtomicRMWInst(AtomicRMWInst &I) { return; } - const bool ValDivergent = UA->isDivergentUse(I.getOperandUse(ValIdx)); + bool ValDivergent = UA->isDivergentUse(I.getOperandUse(ValIdx)); + + if ((Op == AtomicRMWInst::FAdd || Op == AtomicRMWInst::FSub) && + !I.use_empty()) { + // Disable the uniform return value calculation using fmul because it + // mishandles infinities, NaNs and signed zeros. FIXME. + ValDivergent = true; + } // If the value operand is divergent, each lane is contributing a different // value to the atomic calculation. We can only optimize divergent values if @@ -985,6 +992,12 @@ void AMDGPUAtomicOptimizerImpl::optimizeAtomic(Instruction &I, break; case AtomicRMWInst::FAdd: case AtomicRMWInst::FSub: { + // FIXME: This path is currently disabled in visitAtomicRMWInst because + // of problems calculating the first active lane of the result (where + // Mbcnt is 0): + // - If V is infinity or NaN we will return NaN instead of BroadcastI. + // - If BroadcastI is -0.0 and V is positive we will return +0.0 instead + // of -0.0. LaneOffset = B.CreateFMul(V, Mbcnt); break; } diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/global-atomic-fadd.f32-rtn.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/global-atomic-fadd.f32-rtn.ll index e48d281f37c9a..077aff46839a6 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/global-atomic-fadd.f32-rtn.ll +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/global-atomic-fadd.f32-rtn.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py -; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx90a -verify-machineinstrs -stop-after=instruction-select -amdgpu-atomic-optimizer-strategy=DPP < %s | FileCheck -check-prefix=GFX90A_GFX940 %s -; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx940 -verify-machineinstrs -stop-after=instruction-select -amdgpu-atomic-optimizer-strategy=DPP < %s | FileCheck -check-prefix=GFX90A_GFX940 %s +; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx90a -verify-machineinstrs -stop-after=instruction-select -amdgpu-atomic-optimizer-strategy=DPP < %s | FileCheck -check-prefixes=GFX90A_GFX940,GFX90A %s +; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx940 -verify-machineinstrs -stop-after=instruction-select -amdgpu-atomic-optimizer-strategy=DPP < %s | FileCheck -check-prefixes=GFX90A_GFX940,GFX940 %s ; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx1100 -verify-machineinstrs -stop-after=instruction-select -amdgpu-atomic-optimizer-strategy=DPP < %s | FileCheck -check-prefix=GFX11 %s ; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx1200 -verify-machineinstrs -stop-after=instruction-select -amdgpu-atomic-optimizer-strategy=DPP < %s | FileCheck -check-prefix=GFX11 %s @@ -149,6 +149,190 @@ define amdgpu_ps float @global_atomic_fadd_f32_rtn_atomicrmw(ptr addrspace(1) %p } define amdgpu_ps float @global_atomic_fadd_f32_saddr_rtn_atomicrmw(ptr addrspace(1) inreg %ptr, float %data) #0 { + ; GFX90A-LABEL: name: global_atomic_fadd_f32_saddr_rtn_atomicrmw + ; GFX90A: bb.1 (%ir-block.0): + ; GFX90A-NEXT: successors: %bb.2(0x40000000), %bb.4(0x40000000) + ; GFX90A-NEXT: liveins: $sgpr0, $sgpr1, $vgpr0 + ; GFX90A-NEXT: {{ $}} + ; GFX90A-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; GFX90A-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 + ; GFX90A-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 + ; GFX90A-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX90A-NEXT: [[DEF:%[0-9]+]]:sreg_32 = IMPLICIT_DEF + ; GFX90A-NEXT: [[SI_PS_LIVE:%[0-9]+]]:sreg_64_xexec = SI_PS_LIVE + ; GFX90A-NEXT: [[SI_IF:%[0-9]+]]:sreg_64_xexec = SI_IF [[SI_PS_LIVE]], %bb.4, implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX90A-NEXT: S_BRANCH %bb.2 + ; GFX90A-NEXT: {{ $}} + ; GFX90A-NEXT: bb.2 (%ir-block.5): + ; GFX90A-NEXT: successors: %bb.3(0x40000000), %bb.5(0x40000000) + ; GFX90A-NEXT: {{ $}} + ; GFX90A-NEXT: [[COPY3:%[0-9]+]]:sreg_64 = COPY $exec + ; GFX90A-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY [[COPY3]].sub0 + ; GFX90A-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY [[COPY3]].sub0 + ; GFX90A-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY [[COPY3]].sub1 + ; GFX90A-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0 + ; GFX90A-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[S_MOV_B32_]], %subreg.sub1 + ; GFX90A-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY [[REG_SEQUENCE1]].sub0 + ; GFX90A-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sreg_32 = S_MOV_B32 0 + ; GFX90A-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[COPY4]] + ; GFX90A-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_1]] + ; GFX90A-NEXT: [[V_MBCNT_LO_U32_B32_e64_:%[0-9]+]]:vgpr_32 = V_MBCNT_LO_U32_B32_e64 [[COPY8]], [[COPY9]], implicit $exec + ; GFX90A-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[COPY7]] + ; GFX90A-NEXT: [[V_MBCNT_HI_U32_B32_e64_:%[0-9]+]]:vgpr_32 = V_MBCNT_HI_U32_B32_e64 [[COPY10]], [[V_MBCNT_LO_U32_B32_e64_]], implicit $exec + ; GFX90A-NEXT: [[S_MOV_B32_2:%[0-9]+]]:sreg_32 = S_MOV_B32 -2147483648 + ; GFX90A-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_2]] + ; GFX90A-NEXT: [[V_SET_INACTIVE_B32_:%[0-9]+]]:vgpr_32 = V_SET_INACTIVE_B32 [[COPY2]], [[COPY11]], implicit-def dead $scc, implicit $exec + ; GFX90A-NEXT: [[S_MOV_B32_3:%[0-9]+]]:sreg_32 = S_MOV_B32 2147483648 + ; GFX90A-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_3]] + ; GFX90A-NEXT: [[V_MOV_B32_dpp:%[0-9]+]]:vgpr_32 = V_MOV_B32_dpp [[COPY12]], [[V_SET_INACTIVE_B32_]], 273, 15, 15, 0, implicit $exec + ; GFX90A-NEXT: [[V_ADD_F32_e64_:%[0-9]+]]:vgpr_32 = nofpexcept V_ADD_F32_e64 0, [[V_SET_INACTIVE_B32_]], 0, [[V_MOV_B32_dpp]], 0, 0, implicit $mode, implicit $exec + ; GFX90A-NEXT: [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_3]] + ; GFX90A-NEXT: [[V_MOV_B32_dpp1:%[0-9]+]]:vgpr_32 = V_MOV_B32_dpp [[COPY13]], [[V_ADD_F32_e64_]], 274, 15, 15, 0, implicit $exec + ; GFX90A-NEXT: [[V_ADD_F32_e64_1:%[0-9]+]]:vgpr_32 = nofpexcept V_ADD_F32_e64 0, [[V_ADD_F32_e64_]], 0, [[V_MOV_B32_dpp1]], 0, 0, implicit $mode, implicit $exec + ; GFX90A-NEXT: [[COPY14:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_3]] + ; GFX90A-NEXT: [[V_MOV_B32_dpp2:%[0-9]+]]:vgpr_32 = V_MOV_B32_dpp [[COPY14]], [[V_ADD_F32_e64_1]], 276, 15, 15, 0, implicit $exec + ; GFX90A-NEXT: [[V_ADD_F32_e64_2:%[0-9]+]]:vgpr_32 = nofpexcept V_ADD_F32_e64 0, [[V_ADD_F32_e64_1]], 0, [[V_MOV_B32_dpp2]], 0, 0, implicit $mode, implicit $exec + ; GFX90A-NEXT: [[COPY15:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_3]] + ; GFX90A-NEXT: [[V_MOV_B32_dpp3:%[0-9]+]]:vgpr_32 = V_MOV_B32_dpp [[COPY15]], [[V_ADD_F32_e64_2]], 280, 15, 15, 0, implicit $exec + ; GFX90A-NEXT: [[V_ADD_F32_e64_3:%[0-9]+]]:vgpr_32 = nofpexcept V_ADD_F32_e64 0, [[V_ADD_F32_e64_2]], 0, [[V_MOV_B32_dpp3]], 0, 0, implicit $mode, implicit $exec + ; GFX90A-NEXT: [[COPY16:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_3]] + ; GFX90A-NEXT: [[V_MOV_B32_dpp4:%[0-9]+]]:vgpr_32 = V_MOV_B32_dpp [[COPY16]], [[V_ADD_F32_e64_3]], 322, 10, 15, 0, implicit $exec + ; GFX90A-NEXT: [[V_ADD_F32_e64_4:%[0-9]+]]:vgpr_32 = nofpexcept V_ADD_F32_e64 0, [[V_ADD_F32_e64_3]], 0, [[V_MOV_B32_dpp4]], 0, 0, implicit $mode, implicit $exec + ; GFX90A-NEXT: [[COPY17:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_3]] + ; GFX90A-NEXT: [[V_MOV_B32_dpp5:%[0-9]+]]:vgpr_32 = V_MOV_B32_dpp [[COPY17]], [[V_ADD_F32_e64_4]], 323, 12, 15, 0, implicit $exec + ; GFX90A-NEXT: [[V_ADD_F32_e64_5:%[0-9]+]]:vgpr_32 = nofpexcept V_ADD_F32_e64 0, [[V_ADD_F32_e64_4]], 0, [[V_MOV_B32_dpp5]], 0, 0, implicit $mode, implicit $exec + ; GFX90A-NEXT: [[COPY18:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_3]] + ; GFX90A-NEXT: [[V_MOV_B32_dpp6:%[0-9]+]]:vgpr_32 = V_MOV_B32_dpp [[COPY18]], [[V_ADD_F32_e64_5]], 312, 15, 15, 0, implicit $exec + ; GFX90A-NEXT: [[S_MOV_B32_4:%[0-9]+]]:sreg_32 = S_MOV_B32 63 + ; GFX90A-NEXT: [[V_READLANE_B32_:%[0-9]+]]:sreg_32 = V_READLANE_B32 [[V_ADD_F32_e64_5]], [[S_MOV_B32_4]] + ; GFX90A-NEXT: [[COPY19:%[0-9]+]]:vgpr_32 = COPY [[V_READLANE_B32_]] + ; GFX90A-NEXT: [[STRICT_WWM:%[0-9]+]]:vgpr_32 = STRICT_WWM [[COPY19]], implicit $exec + ; GFX90A-NEXT: [[COPY20:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_1]] + ; GFX90A-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_64_xexec = V_CMP_EQ_U32_e64 [[V_MBCNT_HI_U32_B32_e64_]], [[COPY20]], implicit $exec + ; GFX90A-NEXT: [[SI_IF1:%[0-9]+]]:sreg_64_xexec = SI_IF [[V_CMP_EQ_U32_e64_]], %bb.5, implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX90A-NEXT: S_BRANCH %bb.3 + ; GFX90A-NEXT: {{ $}} + ; GFX90A-NEXT: bb.3 (%ir-block.36): + ; GFX90A-NEXT: successors: %bb.5(0x80000000) + ; GFX90A-NEXT: {{ $}} + ; GFX90A-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec + ; GFX90A-NEXT: [[GLOBAL_ATOMIC_ADD_F32_SADDR_RTN:%[0-9]+]]:vgpr_32 = GLOBAL_ATOMIC_ADD_F32_SADDR_RTN [[V_MOV_B32_e32_]], [[STRICT_WWM]], [[REG_SEQUENCE]], 0, 1, implicit $exec :: (load store syncscope("wavefront") monotonic (s32) on %ir.ptr, addrspace 1) + ; GFX90A-NEXT: S_BRANCH %bb.5 + ; GFX90A-NEXT: {{ $}} + ; GFX90A-NEXT: bb.4.Flow: + ; GFX90A-NEXT: successors: %bb.6(0x80000000) + ; GFX90A-NEXT: {{ $}} + ; GFX90A-NEXT: [[PHI:%[0-9]+]]:vgpr_32 = PHI %43, %bb.5, [[DEF]], %bb.1 + ; GFX90A-NEXT: SI_END_CF [[SI_IF]], implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX90A-NEXT: S_BRANCH %bb.6 + ; GFX90A-NEXT: {{ $}} + ; GFX90A-NEXT: bb.5 (%ir-block.39): + ; GFX90A-NEXT: successors: %bb.4(0x80000000) + ; GFX90A-NEXT: {{ $}} + ; GFX90A-NEXT: [[PHI1:%[0-9]+]]:vgpr_32 = PHI [[GLOBAL_ATOMIC_ADD_F32_SADDR_RTN]], %bb.3, [[DEF]], %bb.2 + ; GFX90A-NEXT: SI_END_CF [[SI_IF1]], implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX90A-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32 = V_READFIRSTLANE_B32 [[PHI1]], implicit $exec + ; GFX90A-NEXT: [[STRICT_WWM1:%[0-9]+]]:vgpr_32 = STRICT_WWM [[V_MOV_B32_dpp6]], implicit $exec + ; GFX90A-NEXT: [[COPY21:%[0-9]+]]:vgpr_32 = COPY [[V_READFIRSTLANE_B32_]] + ; GFX90A-NEXT: [[V_ADD_F32_e64_6:%[0-9]+]]:vgpr_32 = nofpexcept V_ADD_F32_e64 0, [[COPY21]], 0, [[STRICT_WWM1]], 0, 0, implicit $mode, implicit $exec + ; GFX90A-NEXT: S_BRANCH %bb.4 + ; GFX90A-NEXT: {{ $}} + ; GFX90A-NEXT: bb.6 (%ir-block.46): + ; GFX90A-NEXT: $vgpr0 = COPY [[PHI]] + ; GFX90A-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; + ; GFX940-LABEL: name: global_atomic_fadd_f32_saddr_rtn_atomicrmw + ; GFX940: bb.1 (%ir-block.0): + ; GFX940-NEXT: successors: %bb.2(0x40000000), %bb.4(0x40000000) + ; GFX940-NEXT: liveins: $sgpr0, $sgpr1, $vgpr0 + ; GFX940-NEXT: {{ $}} + ; GFX940-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; GFX940-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 + ; GFX940-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 + ; GFX940-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX940-NEXT: [[DEF:%[0-9]+]]:sreg_32 = IMPLICIT_DEF + ; GFX940-NEXT: [[SI_PS_LIVE:%[0-9]+]]:sreg_64_xexec = SI_PS_LIVE + ; GFX940-NEXT: [[SI_IF:%[0-9]+]]:sreg_64_xexec = SI_IF [[SI_PS_LIVE]], %bb.4, implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX940-NEXT: S_BRANCH %bb.2 + ; GFX940-NEXT: {{ $}} + ; GFX940-NEXT: bb.2 (%ir-block.5): + ; GFX940-NEXT: successors: %bb.3(0x40000000), %bb.5(0x40000000) + ; GFX940-NEXT: {{ $}} + ; GFX940-NEXT: [[COPY3:%[0-9]+]]:sreg_64 = COPY $exec + ; GFX940-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY [[COPY3]].sub0 + ; GFX940-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY [[COPY3]].sub0 + ; GFX940-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY [[COPY3]].sub1 + ; GFX940-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0 + ; GFX940-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[S_MOV_B32_]], %subreg.sub1 + ; GFX940-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY [[REG_SEQUENCE1]].sub0 + ; GFX940-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sreg_32 = S_MOV_B32 0 + ; GFX940-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[COPY4]] + ; GFX940-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_1]] + ; GFX940-NEXT: [[V_MBCNT_LO_U32_B32_e64_:%[0-9]+]]:vgpr_32 = V_MBCNT_LO_U32_B32_e64 [[COPY8]], [[COPY9]], implicit $exec + ; GFX940-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[COPY7]] + ; GFX940-NEXT: [[V_MBCNT_HI_U32_B32_e64_:%[0-9]+]]:vgpr_32 = V_MBCNT_HI_U32_B32_e64 [[COPY10]], [[V_MBCNT_LO_U32_B32_e64_]], implicit $exec + ; GFX940-NEXT: [[S_MOV_B32_2:%[0-9]+]]:sreg_32 = S_MOV_B32 -2147483648 + ; GFX940-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_2]] + ; GFX940-NEXT: [[V_SET_INACTIVE_B32_:%[0-9]+]]:vgpr_32 = V_SET_INACTIVE_B32 [[COPY2]], [[COPY11]], implicit-def dead $scc, implicit $exec + ; GFX940-NEXT: [[S_MOV_B32_3:%[0-9]+]]:sreg_32 = S_MOV_B32 2147483648 + ; GFX940-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_3]] + ; GFX940-NEXT: [[V_MOV_B32_dpp:%[0-9]+]]:vgpr_32 = V_MOV_B32_dpp [[COPY12]], [[V_SET_INACTIVE_B32_]], 273, 15, 15, 0, implicit $exec + ; GFX940-NEXT: [[V_ADD_F32_e64_:%[0-9]+]]:vgpr_32 = nofpexcept V_ADD_F32_e64 0, [[V_SET_INACTIVE_B32_]], 0, [[V_MOV_B32_dpp]], 0, 0, implicit $mode, implicit $exec + ; GFX940-NEXT: [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_3]] + ; GFX940-NEXT: [[V_MOV_B32_dpp1:%[0-9]+]]:vgpr_32 = V_MOV_B32_dpp [[COPY13]], [[V_ADD_F32_e64_]], 274, 15, 15, 0, implicit $exec + ; GFX940-NEXT: [[V_ADD_F32_e64_1:%[0-9]+]]:vgpr_32 = nofpexcept V_ADD_F32_e64 0, [[V_ADD_F32_e64_]], 0, [[V_MOV_B32_dpp1]], 0, 0, implicit $mode, implicit $exec + ; GFX940-NEXT: [[COPY14:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_3]] + ; GFX940-NEXT: [[V_MOV_B32_dpp2:%[0-9]+]]:vgpr_32 = V_MOV_B32_dpp [[COPY14]], [[V_ADD_F32_e64_1]], 276, 15, 15, 0, implicit $exec + ; GFX940-NEXT: [[V_ADD_F32_e64_2:%[0-9]+]]:vgpr_32 = nofpexcept V_ADD_F32_e64 0, [[V_ADD_F32_e64_1]], 0, [[V_MOV_B32_dpp2]], 0, 0, implicit $mode, implicit $exec + ; GFX940-NEXT: [[COPY15:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_3]] + ; GFX940-NEXT: [[V_MOV_B32_dpp3:%[0-9]+]]:vgpr_32 = V_MOV_B32_dpp [[COPY15]], [[V_ADD_F32_e64_2]], 280, 15, 15, 0, implicit $exec + ; GFX940-NEXT: [[V_ADD_F32_e64_3:%[0-9]+]]:vgpr_32 = nofpexcept V_ADD_F32_e64 0, [[V_ADD_F32_e64_2]], 0, [[V_MOV_B32_dpp3]], 0, 0, implicit $mode, implicit $exec + ; GFX940-NEXT: [[COPY16:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_3]] + ; GFX940-NEXT: [[V_MOV_B32_dpp4:%[0-9]+]]:vgpr_32 = V_MOV_B32_dpp [[COPY16]], [[V_ADD_F32_e64_3]], 322, 10, 15, 0, implicit $exec + ; GFX940-NEXT: [[V_ADD_F32_e64_4:%[0-9]+]]:vgpr_32 = nofpexcept V_ADD_F32_e64 0, [[V_ADD_F32_e64_3]], 0, [[V_MOV_B32_dpp4]], 0, 0, implicit $mode, implicit $exec + ; GFX940-NEXT: [[COPY17:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_3]] + ; GFX940-NEXT: [[V_MOV_B32_dpp5:%[0-9]+]]:vgpr_32 = V_MOV_B32_dpp [[COPY17]], [[V_ADD_F32_e64_4]], 323, 12, 15, 0, implicit $exec + ; GFX940-NEXT: [[V_ADD_F32_e64_5:%[0-9]+]]:vgpr_32 = nofpexcept V_ADD_F32_e64 0, [[V_ADD_F32_e64_4]], 0, [[V_MOV_B32_dpp5]], 0, 0, implicit $mode, implicit $exec + ; GFX940-NEXT: [[COPY18:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_3]] + ; GFX940-NEXT: [[V_MOV_B32_dpp6:%[0-9]+]]:vgpr_32 = V_MOV_B32_dpp [[COPY18]], [[V_ADD_F32_e64_5]], 312, 15, 15, 0, implicit $exec + ; GFX940-NEXT: [[S_MOV_B32_4:%[0-9]+]]:sreg_32 = S_MOV_B32 63 + ; GFX940-NEXT: [[V_READLANE_B32_:%[0-9]+]]:sreg_32 = V_READLANE_B32 [[V_ADD_F32_e64_5]], [[S_MOV_B32_4]] + ; GFX940-NEXT: [[COPY19:%[0-9]+]]:vgpr_32 = COPY [[V_READLANE_B32_]] + ; GFX940-NEXT: [[STRICT_WWM:%[0-9]+]]:vgpr_32 = STRICT_WWM [[COPY19]], implicit $exec + ; GFX940-NEXT: [[COPY20:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_1]] + ; GFX940-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_64_xexec = V_CMP_EQ_U32_e64 [[V_MBCNT_HI_U32_B32_e64_]], [[COPY20]], implicit $exec + ; GFX940-NEXT: [[SI_IF1:%[0-9]+]]:sreg_64_xexec = SI_IF [[V_CMP_EQ_U32_e64_]], %bb.5, implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX940-NEXT: S_BRANCH %bb.3 + ; GFX940-NEXT: {{ $}} + ; GFX940-NEXT: bb.3 (%ir-block.36): + ; GFX940-NEXT: successors: %bb.5(0x80000000) + ; GFX940-NEXT: {{ $}} + ; GFX940-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec + ; GFX940-NEXT: [[GLOBAL_ATOMIC_ADD_F32_SADDR_RTN:%[0-9]+]]:vgpr_32 = GLOBAL_ATOMIC_ADD_F32_SADDR_RTN [[V_MOV_B32_e32_]], [[STRICT_WWM]], [[REG_SEQUENCE]], 0, 1, implicit $exec :: (load store syncscope("wavefront") monotonic (s32) on %ir.ptr, addrspace 1) + ; GFX940-NEXT: S_BRANCH %bb.5 + ; GFX940-NEXT: {{ $}} + ; GFX940-NEXT: bb.4.Flow: + ; GFX940-NEXT: successors: %bb.6(0x80000000) + ; GFX940-NEXT: {{ $}} + ; GFX940-NEXT: [[PHI:%[0-9]+]]:vgpr_32 = PHI %42, %bb.5, [[DEF]], %bb.1 + ; GFX940-NEXT: SI_END_CF [[SI_IF]], implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX940-NEXT: S_BRANCH %bb.6 + ; GFX940-NEXT: {{ $}} + ; GFX940-NEXT: bb.5 (%ir-block.39): + ; GFX940-NEXT: successors: %bb.4(0x80000000) + ; GFX940-NEXT: {{ $}} + ; GFX940-NEXT: [[PHI1:%[0-9]+]]:vgpr_32 = PHI [[GLOBAL_ATOMIC_ADD_F32_SADDR_RTN]], %bb.3, [[DEF]], %bb.2 + ; GFX940-NEXT: SI_END_CF [[SI_IF1]], implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX940-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32 = V_READFIRSTLANE_B32 [[PHI1]], implicit $exec + ; GFX940-NEXT: [[STRICT_WWM1:%[0-9]+]]:vgpr_32 = STRICT_WWM [[V_MOV_B32_dpp6]], implicit $exec + ; GFX940-NEXT: [[COPY21:%[0-9]+]]:vgpr_32 = COPY [[V_READFIRSTLANE_B32_]] + ; GFX940-NEXT: [[V_ADD_F32_e64_6:%[0-9]+]]:vgpr_32 = nofpexcept V_ADD_F32_e64 0, [[COPY21]], 0, [[STRICT_WWM1]], 0, 0, implicit $mode, implicit $exec + ; GFX940-NEXT: S_BRANCH %bb.4 + ; GFX940-NEXT: {{ $}} + ; GFX940-NEXT: bb.6 (%ir-block.46): + ; GFX940-NEXT: $vgpr0 = COPY [[PHI]] + ; GFX940-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; ; GFX11-LABEL: name: global_atomic_fadd_f32_saddr_rtn_atomicrmw ; GFX11: bb.1 (%ir-block.0): ; GFX11-NEXT: successors: %bb.2(0x40000000), %bb.4(0x40000000) diff --git a/llvm/test/CodeGen/AMDGPU/global-atomic-fadd.f32-rtn.ll b/llvm/test/CodeGen/AMDGPU/global-atomic-fadd.f32-rtn.ll index 3454e9d1019e5..d4dee983d4fc0 100644 --- a/llvm/test/CodeGen/AMDGPU/global-atomic-fadd.f32-rtn.ll +++ b/llvm/test/CodeGen/AMDGPU/global-atomic-fadd.f32-rtn.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py -; RUN: llc -mtriple=amdgcn -mcpu=gfx90a -verify-machineinstrs -stop-after=amdgpu-isel -amdgpu-atomic-optimizer-strategy=DPP < %s | FileCheck -check-prefix=GFX90A_GFX940 %s -; RUN: llc -mtriple=amdgcn -mcpu=gfx940 -verify-machineinstrs -stop-after=amdgpu-isel -amdgpu-atomic-optimizer-strategy=DPP < %s | FileCheck -check-prefix=GFX90A_GFX940 %s +; RUN: llc -mtriple=amdgcn -mcpu=gfx90a -verify-machineinstrs -stop-after=amdgpu-isel -amdgpu-atomic-optimizer-strategy=DPP < %s | FileCheck -check-prefixes=GFX90A_GFX940,GFX90A %s +; RUN: llc -mtriple=amdgcn -mcpu=gfx940 -verify-machineinstrs -stop-after=amdgpu-isel -amdgpu-atomic-optimizer-strategy=DPP < %s | FileCheck -check-prefixes=GFX90A_GFX940,GFX940 %s ; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -verify-machineinstrs -stop-after=amdgpu-isel -amdgpu-atomic-optimizer-strategy=DPP < %s | FileCheck -check-prefix=GFX11 %s ; RUN: llc -mtriple=amdgcn -mcpu=gfx1200 -verify-machineinstrs -stop-after=amdgpu-isel -amdgpu-atomic-optimizer-strategy=DPP < %s | FileCheck -check-prefix=GFX11 %s @@ -155,6 +155,162 @@ define amdgpu_ps float @global_atomic_fadd_f32_rtn_atomicrmw(ptr addrspace(1) %p } define amdgpu_ps float @global_atomic_fadd_f32_saddr_rtn_atomicrmw(ptr addrspace(1) inreg %ptr, float %data) #0 { + ; GFX90A-LABEL: name: global_atomic_fadd_f32_saddr_rtn_atomicrmw + ; GFX90A: bb.0 (%ir-block.0): + ; GFX90A-NEXT: successors: %bb.1(0x40000000), %bb.3(0x40000000) + ; GFX90A-NEXT: liveins: $sgpr0, $sgpr1, $vgpr0 + ; GFX90A-NEXT: {{ $}} + ; GFX90A-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX90A-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1 + ; GFX90A-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr0 + ; GFX90A-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY1]], %subreg.sub1 + ; GFX90A-NEXT: [[COPY3:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE]] + ; GFX90A-NEXT: [[SI_PS_LIVE:%[0-9]+]]:sreg_64 = SI_PS_LIVE + ; GFX90A-NEXT: [[DEF:%[0-9]+]]:sgpr_32 = IMPLICIT_DEF + ; GFX90A-NEXT: [[SI_IF:%[0-9]+]]:sreg_64 = SI_IF killed [[SI_PS_LIVE]], %bb.3, implicit-def dead $exec, implicit-def dead $scc, implicit $exec + ; GFX90A-NEXT: S_BRANCH %bb.1 + ; GFX90A-NEXT: {{ $}} + ; GFX90A-NEXT: bb.1 (%ir-block.5): + ; GFX90A-NEXT: successors: %bb.2(0x40000000), %bb.4(0x40000000) + ; GFX90A-NEXT: {{ $}} + ; GFX90A-NEXT: [[COPY4:%[0-9]+]]:sreg_64 = COPY $exec + ; GFX90A-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY [[COPY4]].sub1 + ; GFX90A-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY [[COPY4]].sub0 + ; GFX90A-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0 + ; GFX90A-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] + ; GFX90A-NEXT: [[V_MBCNT_LO_U32_B32_e64_:%[0-9]+]]:vgpr_32 = V_MBCNT_LO_U32_B32_e64 killed [[COPY6]], [[COPY7]], implicit $exec + ; GFX90A-NEXT: [[V_MBCNT_HI_U32_B32_e64_:%[0-9]+]]:vgpr_32 = V_MBCNT_HI_U32_B32_e64 killed [[COPY5]], killed [[V_MBCNT_LO_U32_B32_e64_]], implicit $exec + ; GFX90A-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sreg_32 = S_MOV_B32 -2147483648 + ; GFX90A-NEXT: [[V_SET_INACTIVE_B32_:%[0-9]+]]:vgpr_32 = V_SET_INACTIVE_B32 [[COPY]], killed [[S_MOV_B32_1]], implicit-def dead $scc, implicit $exec + ; GFX90A-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 -2147483648, implicit $exec + ; GFX90A-NEXT: [[V_MOV_B32_dpp:%[0-9]+]]:vgpr_32 = V_MOV_B32_dpp [[V_MOV_B32_e32_]], [[V_SET_INACTIVE_B32_]], 273, 15, 15, 0, implicit $exec + ; GFX90A-NEXT: [[V_ADD_F32_e64_:%[0-9]+]]:vgpr_32 = nofpexcept V_ADD_F32_e64 0, [[V_SET_INACTIVE_B32_]], 0, killed [[V_MOV_B32_dpp]], 0, 0, implicit $mode, implicit $exec + ; GFX90A-NEXT: [[V_MOV_B32_dpp1:%[0-9]+]]:vgpr_32 = V_MOV_B32_dpp [[V_MOV_B32_e32_]], [[V_ADD_F32_e64_]], 274, 15, 15, 0, implicit $exec + ; GFX90A-NEXT: [[V_ADD_F32_e64_1:%[0-9]+]]:vgpr_32 = nofpexcept V_ADD_F32_e64 0, [[V_ADD_F32_e64_]], 0, killed [[V_MOV_B32_dpp1]], 0, 0, implicit $mode, implicit $exec + ; GFX90A-NEXT: [[V_MOV_B32_dpp2:%[0-9]+]]:vgpr_32 = V_MOV_B32_dpp [[V_MOV_B32_e32_]], [[V_ADD_F32_e64_1]], 276, 15, 15, 0, implicit $exec + ; GFX90A-NEXT: [[V_ADD_F32_e64_2:%[0-9]+]]:vgpr_32 = nofpexcept V_ADD_F32_e64 0, [[V_ADD_F32_e64_1]], 0, killed [[V_MOV_B32_dpp2]], 0, 0, implicit $mode, implicit $exec + ; GFX90A-NEXT: [[V_MOV_B32_dpp3:%[0-9]+]]:vgpr_32 = V_MOV_B32_dpp [[V_MOV_B32_e32_]], [[V_ADD_F32_e64_2]], 280, 15, 15, 0, implicit $exec + ; GFX90A-NEXT: [[V_ADD_F32_e64_3:%[0-9]+]]:vgpr_32 = nofpexcept V_ADD_F32_e64 0, [[V_ADD_F32_e64_2]], 0, killed [[V_MOV_B32_dpp3]], 0, 0, implicit $mode, implicit $exec + ; GFX90A-NEXT: [[V_MOV_B32_dpp4:%[0-9]+]]:vgpr_32 = V_MOV_B32_dpp [[V_MOV_B32_e32_]], [[V_ADD_F32_e64_3]], 322, 10, 15, 0, implicit $exec + ; GFX90A-NEXT: [[V_ADD_F32_e64_4:%[0-9]+]]:vgpr_32 = nofpexcept V_ADD_F32_e64 0, [[V_ADD_F32_e64_3]], 0, killed [[V_MOV_B32_dpp4]], 0, 0, implicit $mode, implicit $exec + ; GFX90A-NEXT: [[V_MOV_B32_dpp5:%[0-9]+]]:vgpr_32 = V_MOV_B32_dpp [[V_MOV_B32_e32_]], [[V_ADD_F32_e64_4]], 323, 12, 15, 0, implicit $exec + ; GFX90A-NEXT: [[V_ADD_F32_e64_5:%[0-9]+]]:vgpr_32 = nofpexcept V_ADD_F32_e64 0, [[V_ADD_F32_e64_4]], 0, killed [[V_MOV_B32_dpp5]], 0, 0, implicit $mode, implicit $exec + ; GFX90A-NEXT: [[V_MOV_B32_dpp6:%[0-9]+]]:vgpr_32 = V_MOV_B32_dpp [[V_MOV_B32_e32_]], [[V_ADD_F32_e64_5]], 312, 15, 15, 0, implicit $exec + ; GFX90A-NEXT: [[S_MOV_B32_2:%[0-9]+]]:sreg_32 = S_MOV_B32 63 + ; GFX90A-NEXT: [[V_READLANE_B32_:%[0-9]+]]:sreg_32 = V_READLANE_B32 [[V_ADD_F32_e64_5]], killed [[S_MOV_B32_2]] + ; GFX90A-NEXT: early-clobber %2:sgpr_32 = STRICT_WWM killed [[V_READLANE_B32_]], implicit $exec + ; GFX90A-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_64 = V_CMP_EQ_U32_e64 killed [[V_MBCNT_HI_U32_B32_e64_]], [[S_MOV_B32_]], implicit $exec + ; GFX90A-NEXT: [[DEF1:%[0-9]+]]:sgpr_32 = IMPLICIT_DEF + ; GFX90A-NEXT: [[SI_IF1:%[0-9]+]]:sreg_64 = SI_IF killed [[V_CMP_EQ_U32_e64_]], %bb.4, implicit-def dead $exec, implicit-def dead $scc, implicit $exec + ; GFX90A-NEXT: S_BRANCH %bb.2 + ; GFX90A-NEXT: {{ $}} + ; GFX90A-NEXT: bb.2 (%ir-block.36): + ; GFX90A-NEXT: successors: %bb.4(0x80000000) + ; GFX90A-NEXT: {{ $}} + ; GFX90A-NEXT: [[V_MOV_B32_e32_1:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec + ; GFX90A-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY %2 + ; GFX90A-NEXT: [[GLOBAL_ATOMIC_ADD_F32_SADDR_RTN:%[0-9]+]]:vgpr_32 = GLOBAL_ATOMIC_ADD_F32_SADDR_RTN killed [[V_MOV_B32_e32_1]], [[COPY8]], [[COPY3]], 0, 1, implicit $exec :: (load store syncscope("wavefront") monotonic (s32) on %ir.ptr, addrspace 1) + ; GFX90A-NEXT: S_BRANCH %bb.4 + ; GFX90A-NEXT: {{ $}} + ; GFX90A-NEXT: bb.3.Flow: + ; GFX90A-NEXT: successors: %bb.5(0x80000000) + ; GFX90A-NEXT: {{ $}} + ; GFX90A-NEXT: [[PHI:%[0-9]+]]:vgpr_32 = PHI [[DEF]], %bb.0, %7, %bb.4 + ; GFX90A-NEXT: SI_END_CF [[SI_IF]], implicit-def dead $exec, implicit-def dead $scc, implicit $exec + ; GFX90A-NEXT: S_BRANCH %bb.5 + ; GFX90A-NEXT: {{ $}} + ; GFX90A-NEXT: bb.4 (%ir-block.39): + ; GFX90A-NEXT: successors: %bb.3(0x80000000) + ; GFX90A-NEXT: {{ $}} + ; GFX90A-NEXT: [[PHI1:%[0-9]+]]:vgpr_32 = PHI [[DEF1]], %bb.1, [[GLOBAL_ATOMIC_ADD_F32_SADDR_RTN]], %bb.2 + ; GFX90A-NEXT: SI_END_CF [[SI_IF1]], implicit-def dead $exec, implicit-def dead $scc, implicit $exec + ; GFX90A-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32 = V_READFIRSTLANE_B32 [[PHI1]], implicit $exec + ; GFX90A-NEXT: early-clobber %45:vgpr_32 = STRICT_WWM [[V_MOV_B32_dpp6]], implicit $exec + ; GFX90A-NEXT: [[V_ADD_F32_e64_6:%[0-9]+]]:vgpr_32 = nofpexcept V_ADD_F32_e64 0, killed [[V_READFIRSTLANE_B32_]], 0, killed %45, 0, 0, implicit $mode, implicit $exec + ; GFX90A-NEXT: S_BRANCH %bb.3 + ; GFX90A-NEXT: {{ $}} + ; GFX90A-NEXT: bb.5 (%ir-block.46): + ; GFX90A-NEXT: $vgpr0 = COPY [[PHI]] + ; GFX90A-NEXT: SI_RETURN_TO_EPILOG $vgpr0 + ; + ; GFX940-LABEL: name: global_atomic_fadd_f32_saddr_rtn_atomicrmw + ; GFX940: bb.0 (%ir-block.0): + ; GFX940-NEXT: successors: %bb.1(0x40000000), %bb.3(0x40000000) + ; GFX940-NEXT: liveins: $sgpr0, $sgpr1, $vgpr0 + ; GFX940-NEXT: {{ $}} + ; GFX940-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX940-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1 + ; GFX940-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr0 + ; GFX940-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY1]], %subreg.sub1 + ; GFX940-NEXT: [[COPY3:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE]] + ; GFX940-NEXT: [[SI_PS_LIVE:%[0-9]+]]:sreg_64 = SI_PS_LIVE + ; GFX940-NEXT: [[DEF:%[0-9]+]]:sgpr_32 = IMPLICIT_DEF + ; GFX940-NEXT: [[SI_IF:%[0-9]+]]:sreg_64 = SI_IF killed [[SI_PS_LIVE]], %bb.3, implicit-def dead $exec, implicit-def dead $scc, implicit $exec + ; GFX940-NEXT: S_BRANCH %bb.1 + ; GFX940-NEXT: {{ $}} + ; GFX940-NEXT: bb.1 (%ir-block.5): + ; GFX940-NEXT: successors: %bb.2(0x40000000), %bb.4(0x40000000) + ; GFX940-NEXT: {{ $}} + ; GFX940-NEXT: [[COPY4:%[0-9]+]]:sreg_64 = COPY $exec + ; GFX940-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY [[COPY4]].sub1 + ; GFX940-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY [[COPY4]].sub0 + ; GFX940-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0 + ; GFX940-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] + ; GFX940-NEXT: [[V_MBCNT_LO_U32_B32_e64_:%[0-9]+]]:vgpr_32 = V_MBCNT_LO_U32_B32_e64 killed [[COPY6]], [[COPY7]], implicit $exec + ; GFX940-NEXT: [[V_MBCNT_HI_U32_B32_e64_:%[0-9]+]]:vgpr_32 = V_MBCNT_HI_U32_B32_e64 killed [[COPY5]], killed [[V_MBCNT_LO_U32_B32_e64_]], implicit $exec + ; GFX940-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sreg_32 = S_MOV_B32 -2147483648 + ; GFX940-NEXT: [[V_SET_INACTIVE_B32_:%[0-9]+]]:vgpr_32 = V_SET_INACTIVE_B32 [[COPY]], killed [[S_MOV_B32_1]], implicit-def dead $scc, implicit $exec + ; GFX940-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 -2147483648, implicit $exec + ; GFX940-NEXT: [[V_MOV_B32_dpp:%[0-9]+]]:vgpr_32 = V_MOV_B32_dpp [[V_MOV_B32_e32_]], [[V_SET_INACTIVE_B32_]], 273, 15, 15, 0, implicit $exec + ; GFX940-NEXT: [[V_ADD_F32_e64_:%[0-9]+]]:vgpr_32 = nofpexcept V_ADD_F32_e64 0, [[V_SET_INACTIVE_B32_]], 0, killed [[V_MOV_B32_dpp]], 0, 0, implicit $mode, implicit $exec + ; GFX940-NEXT: [[V_MOV_B32_dpp1:%[0-9]+]]:vgpr_32 = V_MOV_B32_dpp [[V_MOV_B32_e32_]], [[V_ADD_F32_e64_]], 274, 15, 15, 0, implicit $exec + ; GFX940-NEXT: [[V_ADD_F32_e64_1:%[0-9]+]]:vgpr_32 = nofpexcept V_ADD_F32_e64 0, [[V_ADD_F32_e64_]], 0, killed [[V_MOV_B32_dpp1]], 0, 0, implicit $mode, implicit $exec + ; GFX940-NEXT: [[V_MOV_B32_dpp2:%[0-9]+]]:vgpr_32 = V_MOV_B32_dpp [[V_MOV_B32_e32_]], [[V_ADD_F32_e64_1]], 276, 15, 15, 0, implicit $exec + ; GFX940-NEXT: [[V_ADD_F32_e64_2:%[0-9]+]]:vgpr_32 = nofpexcept V_ADD_F32_e64 0, [[V_ADD_F32_e64_1]], 0, killed [[V_MOV_B32_dpp2]], 0, 0, implicit $mode, implicit $exec + ; GFX940-NEXT: [[V_MOV_B32_dpp3:%[0-9]+]]:vgpr_32 = V_MOV_B32_dpp [[V_MOV_B32_e32_]], [[V_ADD_F32_e64_2]], 280, 15, 15, 0, implicit $exec + ; GFX940-NEXT: [[V_ADD_F32_e64_3:%[0-9]+]]:vgpr_32 = nofpexcept V_ADD_F32_e64 0, [[V_ADD_F32_e64_2]], 0, killed [[V_MOV_B32_dpp3]], 0, 0, implicit $mode, implicit $exec + ; GFX940-NEXT: [[V_MOV_B32_dpp4:%[0-9]+]]:vgpr_32 = V_MOV_B32_dpp [[V_MOV_B32_e32_]], [[V_ADD_F32_e64_3]], 322, 10, 15, 0, implicit $exec + ; GFX940-NEXT: [[V_ADD_F32_e64_4:%[0-9]+]]:vgpr_32 = nofpexcept V_ADD_F32_e64 0, [[V_ADD_F32_e64_3]], 0, killed [[V_MOV_B32_dpp4]], 0, 0, implicit $mode, implicit $exec + ; GFX940-NEXT: [[V_MOV_B32_dpp5:%[0-9]+]]:vgpr_32 = V_MOV_B32_dpp [[V_MOV_B32_e32_]], [[V_ADD_F32_e64_4]], 323, 12, 15, 0, implicit $exec + ; GFX940-NEXT: [[V_ADD_F32_e64_5:%[0-9]+]]:vgpr_32 = nofpexcept V_ADD_F32_e64 0, [[V_ADD_F32_e64_4]], 0, killed [[V_MOV_B32_dpp5]], 0, 0, implicit $mode, implicit $exec + ; GFX940-NEXT: [[V_MOV_B32_dpp6:%[0-9]+]]:vgpr_32 = V_MOV_B32_dpp [[V_MOV_B32_e32_]], [[V_ADD_F32_e64_5]], 312, 15, 15, 0, implicit $exec + ; GFX940-NEXT: [[S_MOV_B32_2:%[0-9]+]]:sreg_32 = S_MOV_B32 63 + ; GFX940-NEXT: [[V_READLANE_B32_:%[0-9]+]]:sreg_32 = V_READLANE_B32 [[V_ADD_F32_e64_5]], killed [[S_MOV_B32_2]] + ; GFX940-NEXT: early-clobber %2:sgpr_32 = STRICT_WWM killed [[V_READLANE_B32_]], implicit $exec + ; GFX940-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_64 = V_CMP_EQ_U32_e64 killed [[V_MBCNT_HI_U32_B32_e64_]], [[S_MOV_B32_]], implicit $exec + ; GFX940-NEXT: [[DEF1:%[0-9]+]]:sgpr_32 = IMPLICIT_DEF + ; GFX940-NEXT: [[SI_IF1:%[0-9]+]]:sreg_64 = SI_IF killed [[V_CMP_EQ_U32_e64_]], %bb.4, implicit-def dead $exec, implicit-def dead $scc, implicit $exec + ; GFX940-NEXT: S_BRANCH %bb.2 + ; GFX940-NEXT: {{ $}} + ; GFX940-NEXT: bb.2 (%ir-block.36): + ; GFX940-NEXT: successors: %bb.4(0x80000000) + ; GFX940-NEXT: {{ $}} + ; GFX940-NEXT: [[V_MOV_B32_e32_1:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec + ; GFX940-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY %2 + ; GFX940-NEXT: [[GLOBAL_ATOMIC_ADD_F32_SADDR_RTN:%[0-9]+]]:vgpr_32 = GLOBAL_ATOMIC_ADD_F32_SADDR_RTN killed [[V_MOV_B32_e32_1]], [[COPY8]], [[COPY3]], 0, 1, implicit $exec :: (load store syncscope("wavefront") monotonic (s32) on %ir.ptr, addrspace 1) + ; GFX940-NEXT: S_BRANCH %bb.4 + ; GFX940-NEXT: {{ $}} + ; GFX940-NEXT: bb.3.Flow: + ; GFX940-NEXT: successors: %bb.5(0x80000000) + ; GFX940-NEXT: {{ $}} + ; GFX940-NEXT: [[PHI:%[0-9]+]]:vgpr_32 = PHI [[DEF]], %bb.0, %7, %bb.4 + ; GFX940-NEXT: SI_END_CF [[SI_IF]], implicit-def dead $exec, implicit-def dead $scc, implicit $exec + ; GFX940-NEXT: S_BRANCH %bb.5 + ; GFX940-NEXT: {{ $}} + ; GFX940-NEXT: bb.4 (%ir-block.39): + ; GFX940-NEXT: successors: %bb.3(0x80000000) + ; GFX940-NEXT: {{ $}} + ; GFX940-NEXT: [[PHI1:%[0-9]+]]:vgpr_32 = PHI [[DEF1]], %bb.1, [[GLOBAL_ATOMIC_ADD_F32_SADDR_RTN]], %bb.2 + ; GFX940-NEXT: SI_END_CF [[SI_IF1]], implicit-def dead $exec, implicit-def dead $scc, implicit $exec + ; GFX940-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32 = V_READFIRSTLANE_B32 [[PHI1]], implicit $exec + ; GFX940-NEXT: early-clobber %44:vgpr_32 = STRICT_WWM [[V_MOV_B32_dpp6]], implicit $exec + ; GFX940-NEXT: [[V_ADD_F32_e64_6:%[0-9]+]]:vgpr_32 = nofpexcept V_ADD_F32_e64 0, killed [[V_READFIRSTLANE_B32_]], 0, killed %44, 0, 0, implicit $mode, implicit $exec + ; GFX940-NEXT: S_BRANCH %bb.3 + ; GFX940-NEXT: {{ $}} + ; GFX940-NEXT: bb.5 (%ir-block.46): + ; GFX940-NEXT: $vgpr0 = COPY [[PHI]] + ; GFX940-NEXT: SI_RETURN_TO_EPILOG $vgpr0 + ; ; GFX11-LABEL: name: global_atomic_fadd_f32_saddr_rtn_atomicrmw ; GFX11: bb.0 (%ir-block.0): ; GFX11-NEXT: successors: %bb.1(0x40000000), %bb.3(0x40000000) diff --git a/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fadd-wrong-subtarget.ll b/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fadd-wrong-subtarget.ll index 9fc0b5c57cc3a..32c2078f08fc0 100644 --- a/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fadd-wrong-subtarget.ll +++ b/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fadd-wrong-subtarget.ll @@ -4,42 +4,55 @@ define amdgpu_kernel void @global_atomic_fadd_ret_f32_wrong_subtarget(ptr addrspace(1) %ptr) #1 { ; GCN-LABEL: global_atomic_fadd_ret_f32_wrong_subtarget: ; GCN: ; %bb.0: -; GCN-NEXT: s_mov_b64 s[4:5], exec -; GCN-NEXT: v_mbcnt_lo_u32_b32 v0, s4, 0 -; GCN-NEXT: v_mbcnt_hi_u32_b32 v0, s5, v0 -; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0 -; GCN-NEXT: ; implicit-def: $vgpr1 +; GCN-NEXT: s_mov_b64 s[2:3], exec +; GCN-NEXT: v_bfrev_b32_e32 v1, 1 +; GCN-NEXT: v_mov_b32_e32 v2, 4.0 +; GCN-NEXT: ; implicit-def: $vgpr0 +; GCN-NEXT: .LBB0_1: ; %ComputeLoop +; GCN-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN-NEXT: s_ff1_i32_b64 s6, s[2:3] +; GCN-NEXT: s_lshl_b64 s[4:5], 1, s6 +; GCN-NEXT: v_readfirstlane_b32 s7, v1 +; GCN-NEXT: v_readlane_b32 s8, v2, s6 +; GCN-NEXT: s_mov_b32 m0, s6 +; GCN-NEXT: s_andn2_b64 s[2:3], s[2:3], s[4:5] +; GCN-NEXT: v_writelane_b32 v0, s7, m0 +; GCN-NEXT: s_cmp_lg_u64 s[2:3], 0 +; GCN-NEXT: v_add_f32_e32 v1, s8, v1 +; GCN-NEXT: s_cbranch_scc1 .LBB0_1 +; GCN-NEXT: ; %bb.2: ; %ComputeEnd +; GCN-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0 +; GCN-NEXT: v_mbcnt_hi_u32_b32 v2, exec_hi, v2 +; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 0, v2 +; GCN-NEXT: ; implicit-def: $vgpr2 ; GCN-NEXT: s_and_saveexec_b64 s[2:3], vcc -; GCN-NEXT: s_cbranch_execz .LBB0_4 -; GCN-NEXT: ; %bb.1: +; GCN-NEXT: s_xor_b64 s[2:3], exec, s[2:3] +; GCN-NEXT: s_cbranch_execz .LBB0_6 +; GCN-NEXT: ; %bb.3: ; GCN-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x0 -; GCN-NEXT: s_bcnt1_i32_b64 s7, s[4:5] -; GCN-NEXT: v_cvt_f32_ubyte0_e32 v1, s7 ; GCN-NEXT: s_mov_b64 s[4:5], 0 -; GCN-NEXT: v_mul_f32_e32 v2, 4.0, v1 +; GCN-NEXT: v_mov_b32_e32 v3, 0 ; GCN-NEXT: s_waitcnt lgkmcnt(0) ; GCN-NEXT: s_load_dword s6, s[0:1], 0x0 -; GCN-NEXT: v_mov_b32_e32 v3, 0 ; GCN-NEXT: s_waitcnt lgkmcnt(0) -; GCN-NEXT: v_mov_b32_e32 v1, s6 -; GCN-NEXT: .LBB0_2: ; %atomicrmw.start +; GCN-NEXT: v_mov_b32_e32 v2, s6 +; GCN-NEXT: .LBB0_4: ; %atomicrmw.start ; GCN-NEXT: ; =>This Inner Loop Header: Depth=1 -; GCN-NEXT: v_mov_b32_e32 v5, v1 -; GCN-NEXT: v_add_f32_e32 v4, v5, v2 -; GCN-NEXT: global_atomic_cmpswap v1, v3, v[4:5], s[0:1] glc +; GCN-NEXT: v_mov_b32_e32 v5, v2 +; GCN-NEXT: v_add_f32_e32 v4, v5, v1 +; GCN-NEXT: global_atomic_cmpswap v2, v3, v[4:5], s[0:1] glc ; GCN-NEXT: s_waitcnt vmcnt(0) ; GCN-NEXT: buffer_wbinvl1 -; GCN-NEXT: v_cmp_eq_u32_e32 vcc, v1, v5 +; GCN-NEXT: v_cmp_eq_u32_e32 vcc, v2, v5 ; GCN-NEXT: s_or_b64 s[4:5], vcc, s[4:5] ; GCN-NEXT: s_andn2_b64 exec, exec, s[4:5] -; GCN-NEXT: s_cbranch_execnz .LBB0_2 -; GCN-NEXT: ; %bb.3: ; %Flow +; GCN-NEXT: s_cbranch_execnz .LBB0_4 +; GCN-NEXT: ; %bb.5: ; %Flow ; GCN-NEXT: s_or_b64 exec, exec, s[4:5] -; GCN-NEXT: .LBB0_4: ; %Flow2 +; GCN-NEXT: .LBB0_6: ; %Flow4 ; GCN-NEXT: s_or_b64 exec, exec, s[2:3] -; GCN-NEXT: v_readfirstlane_b32 s0, v1 -; GCN-NEXT: v_cvt_f32_ubyte0_e32 v0, v0 -; GCN-NEXT: v_mad_f32 v0, v0, 4.0, s0 +; GCN-NEXT: v_readfirstlane_b32 s0, v2 +; GCN-NEXT: v_add_f32_e32 v0, s0, v0 ; GCN-NEXT: global_store_dword v[0:1], v0, off ; GCN-NEXT: s_endpgm %result = atomicrmw fadd ptr addrspace(1) %ptr, float 4.0 syncscope("agent") seq_cst diff --git a/llvm/test/CodeGen/AMDGPU/global_atomic_optimizer_fp_rtn.ll b/llvm/test/CodeGen/AMDGPU/global_atomic_optimizer_fp_rtn.ll index 722c069f90a8c..f1c228a923880 100644 --- a/llvm/test/CodeGen/AMDGPU/global_atomic_optimizer_fp_rtn.ll +++ b/llvm/test/CodeGen/AMDGPU/global_atomic_optimizer_fp_rtn.ll @@ -7,37 +7,101 @@ ; strategies are valid for only divergent values. This optimization is valid for divergent addresses. Test also covers different scopes. define amdgpu_ps float @global_atomic_fadd_uni_address_uni_value_agent_scope_unsafe(ptr addrspace(1) inreg %ptr, float inreg %val) #0 { -; IR-LABEL: @global_atomic_fadd_uni_address_uni_value_agent_scope_unsafe( -; IR-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() -; IR-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP24:%.*]] -; IR: 2: -; IR-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) -; IR-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32 -; IR-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32 -; IR-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32 -; IR-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) -; IR-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) -; IR-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]]) -; IR-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP9]] to i32 -; IR-NEXT: [[TMP11:%.*]] = uitofp i32 [[TMP10]] to float -; IR-NEXT: [[TMP12:%.*]] = fmul float [[VAL:%.*]], [[TMP11]] -; IR-NEXT: [[TMP13:%.*]] = icmp eq i32 [[TMP8]], 0 -; IR-NEXT: br i1 [[TMP13]], label [[TMP14:%.*]], label [[TMP16:%.*]] -; IR: 14: -; IR-NEXT: [[TMP15:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[TMP12]] syncscope("agent") monotonic, align 4 -; IR-NEXT: br label [[TMP16]] -; IR: 16: -; IR-NEXT: [[TMP17:%.*]] = phi float [ poison, [[TMP2]] ], [ [[TMP15]], [[TMP14]] ] -; IR-NEXT: [[TMP18:%.*]] = bitcast float [[TMP17]] to i32 -; IR-NEXT: [[TMP19:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP18]]) -; IR-NEXT: [[TMP20:%.*]] = bitcast i32 [[TMP19]] to float -; IR-NEXT: [[TMP21:%.*]] = uitofp i32 [[TMP8]] to float -; IR-NEXT: [[TMP22:%.*]] = fmul float [[VAL]], [[TMP21]] -; IR-NEXT: [[TMP23:%.*]] = fadd float [[TMP20]], [[TMP22]] -; IR-NEXT: br label [[TMP24]] -; IR: 24: -; IR-NEXT: [[TMP25:%.*]] = phi float [ poison, [[TMP0:%.*]] ], [ [[TMP23]], [[TMP16]] ] -; IR-NEXT: ret float [[TMP25]] +; IR-ITERATIVE-LABEL: @global_atomic_fadd_uni_address_uni_value_agent_scope_unsafe( +; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() +; IR-ITERATIVE-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP18:%.*]] +; IR-ITERATIVE: 2: +; IR-ITERATIVE-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) +; IR-ITERATIVE-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32 +; IR-ITERATIVE-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32 +; IR-ITERATIVE-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32 +; IR-ITERATIVE-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) +; IR-ITERATIVE-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) +; IR-ITERATIVE-NEXT: [[TMP9:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) +; IR-ITERATIVE-NEXT: br label [[COMPUTELOOP:%.*]] +; IR-ITERATIVE: 10: +; IR-ITERATIVE-NEXT: [[TMP11:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[TMP29:%.*]] syncscope("agent") monotonic, align 4 +; IR-ITERATIVE-NEXT: br label [[TMP12:%.*]] +; IR-ITERATIVE: 12: +; IR-ITERATIVE-NEXT: [[TMP13:%.*]] = phi float [ poison, [[COMPUTEEND:%.*]] ], [ [[TMP11]], [[TMP10:%.*]] ] +; IR-ITERATIVE-NEXT: [[TMP14:%.*]] = bitcast float [[TMP13]] to i32 +; IR-ITERATIVE-NEXT: [[TMP15:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP14]]) +; IR-ITERATIVE-NEXT: [[TMP16:%.*]] = bitcast i32 [[TMP15]] to float +; IR-ITERATIVE-NEXT: [[TMP17:%.*]] = fadd float [[TMP16]], [[TMP28:%.*]] +; IR-ITERATIVE-NEXT: br label [[TMP18]] +; IR-ITERATIVE: 18: +; IR-ITERATIVE-NEXT: [[TMP19:%.*]] = phi float [ poison, [[TMP0:%.*]] ], [ [[TMP17]], [[TMP12]] ] +; IR-ITERATIVE-NEXT: ret float [[TMP19]] +; IR-ITERATIVE: ComputeLoop: +; IR-ITERATIVE-NEXT: [[ACCUMULATOR:%.*]] = phi float [ -0.000000e+00, [[TMP2]] ], [ [[TMP29]], [[COMPUTELOOP]] ] +; IR-ITERATIVE-NEXT: [[OLDVALUEPHI:%.*]] = phi float [ poison, [[TMP2]] ], [ [[TMP28]], [[COMPUTELOOP]] ] +; IR-ITERATIVE-NEXT: [[ACTIVEBITS:%.*]] = phi i64 [ [[TMP9]], [[TMP2]] ], [ [[TMP32:%.*]], [[COMPUTELOOP]] ] +; IR-ITERATIVE-NEXT: [[TMP20:%.*]] = call i64 @llvm.cttz.i64(i64 [[ACTIVEBITS]], i1 true) +; IR-ITERATIVE-NEXT: [[TMP21:%.*]] = trunc i64 [[TMP20]] to i32 +; IR-ITERATIVE-NEXT: [[TMP22:%.*]] = bitcast float [[VAL:%.*]] to i32 +; IR-ITERATIVE-NEXT: [[TMP23:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[TMP22]], i32 [[TMP21]]) +; IR-ITERATIVE-NEXT: [[TMP24:%.*]] = bitcast i32 [[TMP23]] to float +; IR-ITERATIVE-NEXT: [[TMP25:%.*]] = bitcast float [[ACCUMULATOR]] to i32 +; IR-ITERATIVE-NEXT: [[TMP26:%.*]] = bitcast float [[OLDVALUEPHI]] to i32 +; IR-ITERATIVE-NEXT: [[TMP27:%.*]] = call i32 @llvm.amdgcn.writelane.i32(i32 [[TMP25]], i32 [[TMP21]], i32 [[TMP26]]) +; IR-ITERATIVE-NEXT: [[TMP28]] = bitcast i32 [[TMP27]] to float +; IR-ITERATIVE-NEXT: [[TMP29]] = fadd float [[ACCUMULATOR]], [[TMP24]] +; IR-ITERATIVE-NEXT: [[TMP30:%.*]] = shl i64 1, [[TMP20]] +; IR-ITERATIVE-NEXT: [[TMP31:%.*]] = xor i64 [[TMP30]], -1 +; IR-ITERATIVE-NEXT: [[TMP32]] = and i64 [[ACTIVEBITS]], [[TMP31]] +; IR-ITERATIVE-NEXT: [[TMP33:%.*]] = icmp eq i64 [[TMP32]], 0 +; IR-ITERATIVE-NEXT: br i1 [[TMP33]], label [[COMPUTEEND]], label [[COMPUTELOOP]] +; IR-ITERATIVE: ComputeEnd: +; IR-ITERATIVE-NEXT: [[TMP34:%.*]] = icmp eq i32 [[TMP8]], 0 +; IR-ITERATIVE-NEXT: br i1 [[TMP34]], label [[TMP10]], label [[TMP12]] +; +; IR-DPP-LABEL: @global_atomic_fadd_uni_address_uni_value_agent_scope_unsafe( +; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() +; IR-DPP-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP40:%.*]] +; IR-DPP: 2: +; IR-DPP-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) +; IR-DPP-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32 +; IR-DPP-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32 +; IR-DPP-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32 +; IR-DPP-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) +; IR-DPP-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) +; IR-DPP-NEXT: [[TMP9:%.*]] = bitcast float [[VAL:%.*]] to i32 +; IR-DPP-NEXT: [[TMP10:%.*]] = call i32 @llvm.amdgcn.set.inactive.i32(i32 [[TMP9]], i32 -2147483648) +; IR-DPP-NEXT: [[TMP11:%.*]] = bitcast i32 [[TMP10]] to float +; IR-DPP-NEXT: [[TMP12:%.*]] = bitcast i32 [[TMP9]] to float +; IR-DPP-NEXT: [[TMP13:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP11]], i32 273, i32 15, i32 15, i1 false) +; IR-DPP-NEXT: [[TMP14:%.*]] = fadd float [[TMP11]], [[TMP13]] +; IR-DPP-NEXT: [[TMP15:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP14]], i32 274, i32 15, i32 15, i1 false) +; IR-DPP-NEXT: [[TMP16:%.*]] = fadd float [[TMP14]], [[TMP15]] +; IR-DPP-NEXT: [[TMP17:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP16]], i32 276, i32 15, i32 15, i1 false) +; IR-DPP-NEXT: [[TMP18:%.*]] = fadd float [[TMP16]], [[TMP17]] +; IR-DPP-NEXT: [[TMP19:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP18]], i32 280, i32 15, i32 15, i1 false) +; IR-DPP-NEXT: [[TMP20:%.*]] = fadd float [[TMP18]], [[TMP19]] +; IR-DPP-NEXT: [[TMP21:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP20]], i32 322, i32 10, i32 15, i1 false) +; IR-DPP-NEXT: [[TMP22:%.*]] = fadd float [[TMP20]], [[TMP21]] +; IR-DPP-NEXT: [[TMP23:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP22]], i32 323, i32 12, i32 15, i1 false) +; IR-DPP-NEXT: [[TMP24:%.*]] = fadd float [[TMP22]], [[TMP23]] +; IR-DPP-NEXT: [[TMP25:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP24]], i32 312, i32 15, i32 15, i1 false) +; IR-DPP-NEXT: [[TMP26:%.*]] = bitcast float [[TMP24]] to i32 +; IR-DPP-NEXT: [[TMP27:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[TMP26]], i32 63) +; IR-DPP-NEXT: [[TMP28:%.*]] = bitcast i32 [[TMP27]] to float +; IR-DPP-NEXT: [[TMP29:%.*]] = call float @llvm.amdgcn.strict.wwm.f32(float [[TMP28]]) +; IR-DPP-NEXT: [[TMP30:%.*]] = icmp eq i32 [[TMP8]], 0 +; IR-DPP-NEXT: br i1 [[TMP30]], label [[TMP31:%.*]], label [[TMP33:%.*]] +; IR-DPP: 31: +; IR-DPP-NEXT: [[TMP32:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[TMP29]] syncscope("agent") monotonic, align 4 +; IR-DPP-NEXT: br label [[TMP33]] +; IR-DPP: 33: +; IR-DPP-NEXT: [[TMP34:%.*]] = phi float [ poison, [[TMP2]] ], [ [[TMP32]], [[TMP31]] ] +; IR-DPP-NEXT: [[TMP35:%.*]] = bitcast float [[TMP34]] to i32 +; IR-DPP-NEXT: [[TMP36:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP35]]) +; IR-DPP-NEXT: [[TMP37:%.*]] = bitcast i32 [[TMP36]] to float +; IR-DPP-NEXT: [[TMP38:%.*]] = call float @llvm.amdgcn.strict.wwm.f32(float [[TMP25]]) +; IR-DPP-NEXT: [[TMP39:%.*]] = fadd float [[TMP37]], [[TMP38]] +; IR-DPP-NEXT: br label [[TMP40]] +; IR-DPP: 40: +; IR-DPP-NEXT: [[TMP41:%.*]] = phi float [ poison, [[TMP0:%.*]] ], [ [[TMP39]], [[TMP33]] ] +; IR-DPP-NEXT: ret float [[TMP41]] ; %result = atomicrmw fadd ptr addrspace(1) %ptr, float %val syncscope("agent") monotonic, align 4 ret float %result @@ -147,7 +211,7 @@ define amdgpu_ps float @global_atomic_fadd_uni_address_div_value_scope_agent_sco define amdgpu_ps float @global_atomic_fadd_uni_address_uni_value_one_as_scope_unsafe_strictfp(ptr addrspace(1) inreg %ptr, float inreg %val) #1 { ; IR-ITERATIVE-LABEL: @global_atomic_fadd_uni_address_uni_value_one_as_scope_unsafe_strictfp( ; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR7:[0-9]+]] -; IR-ITERATIVE-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP24:%.*]] +; IR-ITERATIVE-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP18:%.*]] ; IR-ITERATIVE: 2: ; IR-ITERATIVE-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR7]] ; IR-ITERATIVE-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32 @@ -155,31 +219,47 @@ define amdgpu_ps float @global_atomic_fadd_uni_address_uni_value_one_as_scope_un ; IR-ITERATIVE-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32 ; IR-ITERATIVE-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR7]] ; IR-ITERATIVE-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR7]] -; IR-ITERATIVE-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]]) #[[ATTR7]] -; IR-ITERATIVE-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP9]] to i32 -; IR-ITERATIVE-NEXT: [[TMP11:%.*]] = call float @llvm.experimental.constrained.uitofp.f32.i32(i32 [[TMP10]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]] -; IR-ITERATIVE-NEXT: [[TMP12:%.*]] = call float @llvm.experimental.constrained.fmul.f32(float [[VAL:%.*]], float [[TMP11]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]] -; IR-ITERATIVE-NEXT: [[TMP13:%.*]] = icmp eq i32 [[TMP8]], 0 -; IR-ITERATIVE-NEXT: br i1 [[TMP13]], label [[TMP14:%.*]], label [[TMP16:%.*]] -; IR-ITERATIVE: 14: -; IR-ITERATIVE-NEXT: [[TMP15:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[TMP12]] syncscope("one-as") monotonic, align 4 -; IR-ITERATIVE-NEXT: br label [[TMP16]] -; IR-ITERATIVE: 16: -; IR-ITERATIVE-NEXT: [[TMP17:%.*]] = phi float [ poison, [[TMP2]] ], [ [[TMP15]], [[TMP14]] ] -; IR-ITERATIVE-NEXT: [[TMP18:%.*]] = bitcast float [[TMP17]] to i32 -; IR-ITERATIVE-NEXT: [[TMP19:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP18]]) #[[ATTR7]] -; IR-ITERATIVE-NEXT: [[TMP20:%.*]] = bitcast i32 [[TMP19]] to float -; IR-ITERATIVE-NEXT: [[TMP21:%.*]] = call float @llvm.experimental.constrained.uitofp.f32.i32(i32 [[TMP8]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]] -; IR-ITERATIVE-NEXT: [[TMP22:%.*]] = call float @llvm.experimental.constrained.fmul.f32(float [[VAL]], float [[TMP21]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]] -; IR-ITERATIVE-NEXT: [[TMP23:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[TMP20]], float [[TMP22]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]] -; IR-ITERATIVE-NEXT: br label [[TMP24]] -; IR-ITERATIVE: 24: -; IR-ITERATIVE-NEXT: [[TMP25:%.*]] = phi float [ poison, [[TMP0:%.*]] ], [ [[TMP23]], [[TMP16]] ] -; IR-ITERATIVE-NEXT: ret float [[TMP25]] +; IR-ITERATIVE-NEXT: [[TMP9:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR7]] +; IR-ITERATIVE-NEXT: br label [[COMPUTELOOP:%.*]] +; IR-ITERATIVE: 10: +; IR-ITERATIVE-NEXT: [[TMP11:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[TMP29:%.*]] syncscope("one-as") monotonic, align 4 +; IR-ITERATIVE-NEXT: br label [[TMP12:%.*]] +; IR-ITERATIVE: 12: +; IR-ITERATIVE-NEXT: [[TMP13:%.*]] = phi float [ poison, [[COMPUTEEND:%.*]] ], [ [[TMP11]], [[TMP10:%.*]] ] +; IR-ITERATIVE-NEXT: [[TMP14:%.*]] = bitcast float [[TMP13]] to i32 +; IR-ITERATIVE-NEXT: [[TMP15:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP14]]) #[[ATTR7]] +; IR-ITERATIVE-NEXT: [[TMP16:%.*]] = bitcast i32 [[TMP15]] to float +; IR-ITERATIVE-NEXT: [[TMP17:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[TMP16]], float [[TMP28:%.*]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]] +; IR-ITERATIVE-NEXT: br label [[TMP18]] +; IR-ITERATIVE: 18: +; IR-ITERATIVE-NEXT: [[TMP19:%.*]] = phi float [ poison, [[TMP0:%.*]] ], [ [[TMP17]], [[TMP12]] ] +; IR-ITERATIVE-NEXT: ret float [[TMP19]] +; IR-ITERATIVE: ComputeLoop: +; IR-ITERATIVE-NEXT: [[ACCUMULATOR:%.*]] = phi float [ -0.000000e+00, [[TMP2]] ], [ [[TMP29]], [[COMPUTELOOP]] ] +; IR-ITERATIVE-NEXT: [[OLDVALUEPHI:%.*]] = phi float [ poison, [[TMP2]] ], [ [[TMP28]], [[COMPUTELOOP]] ] +; IR-ITERATIVE-NEXT: [[ACTIVEBITS:%.*]] = phi i64 [ [[TMP9]], [[TMP2]] ], [ [[TMP32:%.*]], [[COMPUTELOOP]] ] +; IR-ITERATIVE-NEXT: [[TMP20:%.*]] = call i64 @llvm.cttz.i64(i64 [[ACTIVEBITS]], i1 true) #[[ATTR7]] +; IR-ITERATIVE-NEXT: [[TMP21:%.*]] = trunc i64 [[TMP20]] to i32 +; IR-ITERATIVE-NEXT: [[TMP22:%.*]] = bitcast float [[VAL:%.*]] to i32 +; IR-ITERATIVE-NEXT: [[TMP23:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[TMP22]], i32 [[TMP21]]) #[[ATTR7]] +; IR-ITERATIVE-NEXT: [[TMP24:%.*]] = bitcast i32 [[TMP23]] to float +; IR-ITERATIVE-NEXT: [[TMP25:%.*]] = bitcast float [[ACCUMULATOR]] to i32 +; IR-ITERATIVE-NEXT: [[TMP26:%.*]] = bitcast float [[OLDVALUEPHI]] to i32 +; IR-ITERATIVE-NEXT: [[TMP27:%.*]] = call i32 @llvm.amdgcn.writelane.i32(i32 [[TMP25]], i32 [[TMP21]], i32 [[TMP26]]) #[[ATTR7]] +; IR-ITERATIVE-NEXT: [[TMP28]] = bitcast i32 [[TMP27]] to float +; IR-ITERATIVE-NEXT: [[TMP29]] = call float @llvm.experimental.constrained.fadd.f32(float [[ACCUMULATOR]], float [[TMP24]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]] +; IR-ITERATIVE-NEXT: [[TMP30:%.*]] = shl i64 1, [[TMP20]] +; IR-ITERATIVE-NEXT: [[TMP31:%.*]] = xor i64 [[TMP30]], -1 +; IR-ITERATIVE-NEXT: [[TMP32]] = and i64 [[ACTIVEBITS]], [[TMP31]] +; IR-ITERATIVE-NEXT: [[TMP33:%.*]] = icmp eq i64 [[TMP32]], 0 +; IR-ITERATIVE-NEXT: br i1 [[TMP33]], label [[COMPUTEEND]], label [[COMPUTELOOP]] +; IR-ITERATIVE: ComputeEnd: +; IR-ITERATIVE-NEXT: [[TMP34:%.*]] = icmp eq i32 [[TMP8]], 0 +; IR-ITERATIVE-NEXT: br i1 [[TMP34]], label [[TMP10]], label [[TMP12]] ; ; IR-DPP-LABEL: @global_atomic_fadd_uni_address_uni_value_one_as_scope_unsafe_strictfp( ; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR8:[0-9]+]] -; IR-DPP-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP24:%.*]] +; IR-DPP-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP40:%.*]] ; IR-DPP: 2: ; IR-DPP-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR8]] ; IR-DPP-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32 @@ -187,27 +267,43 @@ define amdgpu_ps float @global_atomic_fadd_uni_address_uni_value_one_as_scope_un ; IR-DPP-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32 ; IR-DPP-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR8]] ; IR-DPP-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR8]] -; IR-DPP-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]]) #[[ATTR8]] -; IR-DPP-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP9]] to i32 -; IR-DPP-NEXT: [[TMP11:%.*]] = call float @llvm.experimental.constrained.uitofp.f32.i32(i32 [[TMP10]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]] -; IR-DPP-NEXT: [[TMP12:%.*]] = call float @llvm.experimental.constrained.fmul.f32(float [[VAL:%.*]], float [[TMP11]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]] -; IR-DPP-NEXT: [[TMP13:%.*]] = icmp eq i32 [[TMP8]], 0 -; IR-DPP-NEXT: br i1 [[TMP13]], label [[TMP14:%.*]], label [[TMP16:%.*]] -; IR-DPP: 14: -; IR-DPP-NEXT: [[TMP15:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[TMP12]] syncscope("one-as") monotonic, align 4 -; IR-DPP-NEXT: br label [[TMP16]] -; IR-DPP: 16: -; IR-DPP-NEXT: [[TMP17:%.*]] = phi float [ poison, [[TMP2]] ], [ [[TMP15]], [[TMP14]] ] -; IR-DPP-NEXT: [[TMP18:%.*]] = bitcast float [[TMP17]] to i32 -; IR-DPP-NEXT: [[TMP19:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP18]]) #[[ATTR8]] -; IR-DPP-NEXT: [[TMP20:%.*]] = bitcast i32 [[TMP19]] to float -; IR-DPP-NEXT: [[TMP21:%.*]] = call float @llvm.experimental.constrained.uitofp.f32.i32(i32 [[TMP8]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]] -; IR-DPP-NEXT: [[TMP22:%.*]] = call float @llvm.experimental.constrained.fmul.f32(float [[VAL]], float [[TMP21]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]] -; IR-DPP-NEXT: [[TMP23:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[TMP20]], float [[TMP22]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]] -; IR-DPP-NEXT: br label [[TMP24]] -; IR-DPP: 24: -; IR-DPP-NEXT: [[TMP25:%.*]] = phi float [ poison, [[TMP0:%.*]] ], [ [[TMP23]], [[TMP16]] ] -; IR-DPP-NEXT: ret float [[TMP25]] +; IR-DPP-NEXT: [[TMP9:%.*]] = bitcast float [[VAL:%.*]] to i32 +; IR-DPP-NEXT: [[TMP10:%.*]] = call i32 @llvm.amdgcn.set.inactive.i32(i32 [[TMP9]], i32 -2147483648) #[[ATTR8]] +; IR-DPP-NEXT: [[TMP11:%.*]] = bitcast i32 [[TMP10]] to float +; IR-DPP-NEXT: [[TMP12:%.*]] = bitcast i32 [[TMP9]] to float +; IR-DPP-NEXT: [[TMP13:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP11]], i32 273, i32 15, i32 15, i1 false) #[[ATTR8]] +; IR-DPP-NEXT: [[TMP14:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[TMP11]], float [[TMP13]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]] +; IR-DPP-NEXT: [[TMP15:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP14]], i32 274, i32 15, i32 15, i1 false) #[[ATTR8]] +; IR-DPP-NEXT: [[TMP16:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[TMP14]], float [[TMP15]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]] +; IR-DPP-NEXT: [[TMP17:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP16]], i32 276, i32 15, i32 15, i1 false) #[[ATTR8]] +; IR-DPP-NEXT: [[TMP18:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[TMP16]], float [[TMP17]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]] +; IR-DPP-NEXT: [[TMP19:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP18]], i32 280, i32 15, i32 15, i1 false) #[[ATTR8]] +; IR-DPP-NEXT: [[TMP20:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[TMP18]], float [[TMP19]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]] +; IR-DPP-NEXT: [[TMP21:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP20]], i32 322, i32 10, i32 15, i1 false) #[[ATTR8]] +; IR-DPP-NEXT: [[TMP22:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[TMP20]], float [[TMP21]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]] +; IR-DPP-NEXT: [[TMP23:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP22]], i32 323, i32 12, i32 15, i1 false) #[[ATTR8]] +; IR-DPP-NEXT: [[TMP24:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[TMP22]], float [[TMP23]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]] +; IR-DPP-NEXT: [[TMP25:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP24]], i32 312, i32 15, i32 15, i1 false) #[[ATTR8]] +; IR-DPP-NEXT: [[TMP26:%.*]] = bitcast float [[TMP24]] to i32 +; IR-DPP-NEXT: [[TMP27:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[TMP26]], i32 63) #[[ATTR8]] +; IR-DPP-NEXT: [[TMP28:%.*]] = bitcast i32 [[TMP27]] to float +; IR-DPP-NEXT: [[TMP29:%.*]] = call float @llvm.amdgcn.strict.wwm.f32(float [[TMP28]]) #[[ATTR8]] +; IR-DPP-NEXT: [[TMP30:%.*]] = icmp eq i32 [[TMP8]], 0 +; IR-DPP-NEXT: br i1 [[TMP30]], label [[TMP31:%.*]], label [[TMP33:%.*]] +; IR-DPP: 31: +; IR-DPP-NEXT: [[TMP32:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[TMP29]] syncscope("one-as") monotonic, align 4 +; IR-DPP-NEXT: br label [[TMP33]] +; IR-DPP: 33: +; IR-DPP-NEXT: [[TMP34:%.*]] = phi float [ poison, [[TMP2]] ], [ [[TMP32]], [[TMP31]] ] +; IR-DPP-NEXT: [[TMP35:%.*]] = bitcast float [[TMP34]] to i32 +; IR-DPP-NEXT: [[TMP36:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP35]]) #[[ATTR8]] +; IR-DPP-NEXT: [[TMP37:%.*]] = bitcast i32 [[TMP36]] to float +; IR-DPP-NEXT: [[TMP38:%.*]] = call float @llvm.amdgcn.strict.wwm.f32(float [[TMP25]]) #[[ATTR8]] +; IR-DPP-NEXT: [[TMP39:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[TMP37]], float [[TMP38]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]] +; IR-DPP-NEXT: br label [[TMP40]] +; IR-DPP: 40: +; IR-DPP-NEXT: [[TMP41:%.*]] = phi float [ poison, [[TMP0:%.*]] ], [ [[TMP39]], [[TMP33]] ] +; IR-DPP-NEXT: ret float [[TMP41]] ; %result = atomicrmw fadd ptr addrspace(1) %ptr, float %val syncscope("one-as") monotonic ret float %result @@ -317,7 +413,7 @@ define amdgpu_ps float @global_atomic_fadd_uni_address_div_value_one_as_scope_un define amdgpu_ps float @global_atomic_fsub_uni_address_uni_value_agent_scope_strictfp(ptr addrspace(1) inreg %ptr, float inreg %val) #2 { ; IR-ITERATIVE-LABEL: @global_atomic_fsub_uni_address_uni_value_agent_scope_strictfp( ; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR7]] -; IR-ITERATIVE-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP24:%.*]] +; IR-ITERATIVE-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP18:%.*]] ; IR-ITERATIVE: 2: ; IR-ITERATIVE-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR7]] ; IR-ITERATIVE-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32 @@ -325,31 +421,47 @@ define amdgpu_ps float @global_atomic_fsub_uni_address_uni_value_agent_scope_str ; IR-ITERATIVE-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32 ; IR-ITERATIVE-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR7]] ; IR-ITERATIVE-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR7]] -; IR-ITERATIVE-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]]) #[[ATTR7]] -; IR-ITERATIVE-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP9]] to i32 -; IR-ITERATIVE-NEXT: [[TMP11:%.*]] = call float @llvm.experimental.constrained.uitofp.f32.i32(i32 [[TMP10]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]] -; IR-ITERATIVE-NEXT: [[TMP12:%.*]] = call float @llvm.experimental.constrained.fmul.f32(float [[VAL:%.*]], float [[TMP11]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]] -; IR-ITERATIVE-NEXT: [[TMP13:%.*]] = icmp eq i32 [[TMP8]], 0 -; IR-ITERATIVE-NEXT: br i1 [[TMP13]], label [[TMP14:%.*]], label [[TMP16:%.*]] -; IR-ITERATIVE: 14: -; IR-ITERATIVE-NEXT: [[TMP15:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[TMP12]] syncscope("agent") monotonic, align 4 -; IR-ITERATIVE-NEXT: br label [[TMP16]] -; IR-ITERATIVE: 16: -; IR-ITERATIVE-NEXT: [[TMP17:%.*]] = phi float [ poison, [[TMP2]] ], [ [[TMP15]], [[TMP14]] ] -; IR-ITERATIVE-NEXT: [[TMP18:%.*]] = bitcast float [[TMP17]] to i32 -; IR-ITERATIVE-NEXT: [[TMP19:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP18]]) #[[ATTR7]] -; IR-ITERATIVE-NEXT: [[TMP20:%.*]] = bitcast i32 [[TMP19]] to float -; IR-ITERATIVE-NEXT: [[TMP21:%.*]] = call float @llvm.experimental.constrained.uitofp.f32.i32(i32 [[TMP8]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]] -; IR-ITERATIVE-NEXT: [[TMP22:%.*]] = call float @llvm.experimental.constrained.fmul.f32(float [[VAL]], float [[TMP21]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]] -; IR-ITERATIVE-NEXT: [[TMP23:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[TMP20]], float [[TMP22]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]] -; IR-ITERATIVE-NEXT: br label [[TMP24]] -; IR-ITERATIVE: 24: -; IR-ITERATIVE-NEXT: [[TMP25:%.*]] = phi float [ poison, [[TMP0:%.*]] ], [ [[TMP23]], [[TMP16]] ] -; IR-ITERATIVE-NEXT: ret float [[TMP25]] +; IR-ITERATIVE-NEXT: [[TMP9:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR7]] +; IR-ITERATIVE-NEXT: br label [[COMPUTELOOP:%.*]] +; IR-ITERATIVE: 10: +; IR-ITERATIVE-NEXT: [[TMP11:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[TMP29:%.*]] syncscope("agent") monotonic, align 4 +; IR-ITERATIVE-NEXT: br label [[TMP12:%.*]] +; IR-ITERATIVE: 12: +; IR-ITERATIVE-NEXT: [[TMP13:%.*]] = phi float [ poison, [[COMPUTEEND:%.*]] ], [ [[TMP11]], [[TMP10:%.*]] ] +; IR-ITERATIVE-NEXT: [[TMP14:%.*]] = bitcast float [[TMP13]] to i32 +; IR-ITERATIVE-NEXT: [[TMP15:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP14]]) #[[ATTR7]] +; IR-ITERATIVE-NEXT: [[TMP16:%.*]] = bitcast i32 [[TMP15]] to float +; IR-ITERATIVE-NEXT: [[TMP17:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[TMP16]], float [[TMP28:%.*]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]] +; IR-ITERATIVE-NEXT: br label [[TMP18]] +; IR-ITERATIVE: 18: +; IR-ITERATIVE-NEXT: [[TMP19:%.*]] = phi float [ poison, [[TMP0:%.*]] ], [ [[TMP17]], [[TMP12]] ] +; IR-ITERATIVE-NEXT: ret float [[TMP19]] +; IR-ITERATIVE: ComputeLoop: +; IR-ITERATIVE-NEXT: [[ACCUMULATOR:%.*]] = phi float [ -0.000000e+00, [[TMP2]] ], [ [[TMP29]], [[COMPUTELOOP]] ] +; IR-ITERATIVE-NEXT: [[OLDVALUEPHI:%.*]] = phi float [ poison, [[TMP2]] ], [ [[TMP28]], [[COMPUTELOOP]] ] +; IR-ITERATIVE-NEXT: [[ACTIVEBITS:%.*]] = phi i64 [ [[TMP9]], [[TMP2]] ], [ [[TMP32:%.*]], [[COMPUTELOOP]] ] +; IR-ITERATIVE-NEXT: [[TMP20:%.*]] = call i64 @llvm.cttz.i64(i64 [[ACTIVEBITS]], i1 true) #[[ATTR7]] +; IR-ITERATIVE-NEXT: [[TMP21:%.*]] = trunc i64 [[TMP20]] to i32 +; IR-ITERATIVE-NEXT: [[TMP22:%.*]] = bitcast float [[VAL:%.*]] to i32 +; IR-ITERATIVE-NEXT: [[TMP23:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[TMP22]], i32 [[TMP21]]) #[[ATTR7]] +; IR-ITERATIVE-NEXT: [[TMP24:%.*]] = bitcast i32 [[TMP23]] to float +; IR-ITERATIVE-NEXT: [[TMP25:%.*]] = bitcast float [[ACCUMULATOR]] to i32 +; IR-ITERATIVE-NEXT: [[TMP26:%.*]] = bitcast float [[OLDVALUEPHI]] to i32 +; IR-ITERATIVE-NEXT: [[TMP27:%.*]] = call i32 @llvm.amdgcn.writelane.i32(i32 [[TMP25]], i32 [[TMP21]], i32 [[TMP26]]) #[[ATTR7]] +; IR-ITERATIVE-NEXT: [[TMP28]] = bitcast i32 [[TMP27]] to float +; IR-ITERATIVE-NEXT: [[TMP29]] = call float @llvm.experimental.constrained.fadd.f32(float [[ACCUMULATOR]], float [[TMP24]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]] +; IR-ITERATIVE-NEXT: [[TMP30:%.*]] = shl i64 1, [[TMP20]] +; IR-ITERATIVE-NEXT: [[TMP31:%.*]] = xor i64 [[TMP30]], -1 +; IR-ITERATIVE-NEXT: [[TMP32]] = and i64 [[ACTIVEBITS]], [[TMP31]] +; IR-ITERATIVE-NEXT: [[TMP33:%.*]] = icmp eq i64 [[TMP32]], 0 +; IR-ITERATIVE-NEXT: br i1 [[TMP33]], label [[COMPUTEEND]], label [[COMPUTELOOP]] +; IR-ITERATIVE: ComputeEnd: +; IR-ITERATIVE-NEXT: [[TMP34:%.*]] = icmp eq i32 [[TMP8]], 0 +; IR-ITERATIVE-NEXT: br i1 [[TMP34]], label [[TMP10]], label [[TMP12]] ; ; IR-DPP-LABEL: @global_atomic_fsub_uni_address_uni_value_agent_scope_strictfp( ; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR8]] -; IR-DPP-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP24:%.*]] +; IR-DPP-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP40:%.*]] ; IR-DPP: 2: ; IR-DPP-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR8]] ; IR-DPP-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32 @@ -357,27 +469,43 @@ define amdgpu_ps float @global_atomic_fsub_uni_address_uni_value_agent_scope_str ; IR-DPP-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32 ; IR-DPP-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR8]] ; IR-DPP-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR8]] -; IR-DPP-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]]) #[[ATTR8]] -; IR-DPP-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP9]] to i32 -; IR-DPP-NEXT: [[TMP11:%.*]] = call float @llvm.experimental.constrained.uitofp.f32.i32(i32 [[TMP10]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]] -; IR-DPP-NEXT: [[TMP12:%.*]] = call float @llvm.experimental.constrained.fmul.f32(float [[VAL:%.*]], float [[TMP11]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]] -; IR-DPP-NEXT: [[TMP13:%.*]] = icmp eq i32 [[TMP8]], 0 -; IR-DPP-NEXT: br i1 [[TMP13]], label [[TMP14:%.*]], label [[TMP16:%.*]] -; IR-DPP: 14: -; IR-DPP-NEXT: [[TMP15:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[TMP12]] syncscope("agent") monotonic, align 4 -; IR-DPP-NEXT: br label [[TMP16]] -; IR-DPP: 16: -; IR-DPP-NEXT: [[TMP17:%.*]] = phi float [ poison, [[TMP2]] ], [ [[TMP15]], [[TMP14]] ] -; IR-DPP-NEXT: [[TMP18:%.*]] = bitcast float [[TMP17]] to i32 -; IR-DPP-NEXT: [[TMP19:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP18]]) #[[ATTR8]] -; IR-DPP-NEXT: [[TMP20:%.*]] = bitcast i32 [[TMP19]] to float -; IR-DPP-NEXT: [[TMP21:%.*]] = call float @llvm.experimental.constrained.uitofp.f32.i32(i32 [[TMP8]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]] -; IR-DPP-NEXT: [[TMP22:%.*]] = call float @llvm.experimental.constrained.fmul.f32(float [[VAL]], float [[TMP21]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]] -; IR-DPP-NEXT: [[TMP23:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[TMP20]], float [[TMP22]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]] -; IR-DPP-NEXT: br label [[TMP24]] -; IR-DPP: 24: -; IR-DPP-NEXT: [[TMP25:%.*]] = phi float [ poison, [[TMP0:%.*]] ], [ [[TMP23]], [[TMP16]] ] -; IR-DPP-NEXT: ret float [[TMP25]] +; IR-DPP-NEXT: [[TMP9:%.*]] = bitcast float [[VAL:%.*]] to i32 +; IR-DPP-NEXT: [[TMP10:%.*]] = call i32 @llvm.amdgcn.set.inactive.i32(i32 [[TMP9]], i32 -2147483648) #[[ATTR8]] +; IR-DPP-NEXT: [[TMP11:%.*]] = bitcast i32 [[TMP10]] to float +; IR-DPP-NEXT: [[TMP12:%.*]] = bitcast i32 [[TMP9]] to float +; IR-DPP-NEXT: [[TMP13:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP11]], i32 273, i32 15, i32 15, i1 false) #[[ATTR8]] +; IR-DPP-NEXT: [[TMP14:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[TMP11]], float [[TMP13]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]] +; IR-DPP-NEXT: [[TMP15:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP14]], i32 274, i32 15, i32 15, i1 false) #[[ATTR8]] +; IR-DPP-NEXT: [[TMP16:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[TMP14]], float [[TMP15]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]] +; IR-DPP-NEXT: [[TMP17:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP16]], i32 276, i32 15, i32 15, i1 false) #[[ATTR8]] +; IR-DPP-NEXT: [[TMP18:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[TMP16]], float [[TMP17]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]] +; IR-DPP-NEXT: [[TMP19:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP18]], i32 280, i32 15, i32 15, i1 false) #[[ATTR8]] +; IR-DPP-NEXT: [[TMP20:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[TMP18]], float [[TMP19]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]] +; IR-DPP-NEXT: [[TMP21:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP20]], i32 322, i32 10, i32 15, i1 false) #[[ATTR8]] +; IR-DPP-NEXT: [[TMP22:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[TMP20]], float [[TMP21]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]] +; IR-DPP-NEXT: [[TMP23:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP22]], i32 323, i32 12, i32 15, i1 false) #[[ATTR8]] +; IR-DPP-NEXT: [[TMP24:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[TMP22]], float [[TMP23]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]] +; IR-DPP-NEXT: [[TMP25:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP24]], i32 312, i32 15, i32 15, i1 false) #[[ATTR8]] +; IR-DPP-NEXT: [[TMP26:%.*]] = bitcast float [[TMP24]] to i32 +; IR-DPP-NEXT: [[TMP27:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[TMP26]], i32 63) #[[ATTR8]] +; IR-DPP-NEXT: [[TMP28:%.*]] = bitcast i32 [[TMP27]] to float +; IR-DPP-NEXT: [[TMP29:%.*]] = call float @llvm.amdgcn.strict.wwm.f32(float [[TMP28]]) #[[ATTR8]] +; IR-DPP-NEXT: [[TMP30:%.*]] = icmp eq i32 [[TMP8]], 0 +; IR-DPP-NEXT: br i1 [[TMP30]], label [[TMP31:%.*]], label [[TMP33:%.*]] +; IR-DPP: 31: +; IR-DPP-NEXT: [[TMP32:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[TMP29]] syncscope("agent") monotonic, align 4 +; IR-DPP-NEXT: br label [[TMP33]] +; IR-DPP: 33: +; IR-DPP-NEXT: [[TMP34:%.*]] = phi float [ poison, [[TMP2]] ], [ [[TMP32]], [[TMP31]] ] +; IR-DPP-NEXT: [[TMP35:%.*]] = bitcast float [[TMP34]] to i32 +; IR-DPP-NEXT: [[TMP36:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP35]]) #[[ATTR8]] +; IR-DPP-NEXT: [[TMP37:%.*]] = bitcast i32 [[TMP36]] to float +; IR-DPP-NEXT: [[TMP38:%.*]] = call float @llvm.amdgcn.strict.wwm.f32(float [[TMP25]]) #[[ATTR8]] +; IR-DPP-NEXT: [[TMP39:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[TMP37]], float [[TMP38]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]] +; IR-DPP-NEXT: br label [[TMP40]] +; IR-DPP: 40: +; IR-DPP-NEXT: [[TMP41:%.*]] = phi float [ poison, [[TMP0:%.*]] ], [ [[TMP39]], [[TMP33]] ] +; IR-DPP-NEXT: ret float [[TMP41]] ; %result = atomicrmw fadd ptr addrspace(1) %ptr, float %val syncscope("agent") monotonic ret float %result @@ -783,7 +911,7 @@ define amdgpu_ps float @global_atomic_fmax_uni_address_div_value_agent_scope_uns define amdgpu_ps float @global_atomic_fadd_uni_address_uni_value_system_scope_strictfp(ptr addrspace(1) inreg %ptr, float inreg %val) #2 { ; IR-ITERATIVE-LABEL: @global_atomic_fadd_uni_address_uni_value_system_scope_strictfp( ; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR7]] -; IR-ITERATIVE-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP24:%.*]] +; IR-ITERATIVE-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP18:%.*]] ; IR-ITERATIVE: 2: ; IR-ITERATIVE-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR7]] ; IR-ITERATIVE-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32 @@ -791,31 +919,47 @@ define amdgpu_ps float @global_atomic_fadd_uni_address_uni_value_system_scope_st ; IR-ITERATIVE-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32 ; IR-ITERATIVE-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR7]] ; IR-ITERATIVE-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR7]] -; IR-ITERATIVE-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]]) #[[ATTR7]] -; IR-ITERATIVE-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP9]] to i32 -; IR-ITERATIVE-NEXT: [[TMP11:%.*]] = call float @llvm.experimental.constrained.uitofp.f32.i32(i32 [[TMP10]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]] -; IR-ITERATIVE-NEXT: [[TMP12:%.*]] = call float @llvm.experimental.constrained.fmul.f32(float [[VAL:%.*]], float [[TMP11]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]] -; IR-ITERATIVE-NEXT: [[TMP13:%.*]] = icmp eq i32 [[TMP8]], 0 -; IR-ITERATIVE-NEXT: br i1 [[TMP13]], label [[TMP14:%.*]], label [[TMP16:%.*]] -; IR-ITERATIVE: 14: -; IR-ITERATIVE-NEXT: [[TMP15:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[TMP12]] monotonic, align 4 -; IR-ITERATIVE-NEXT: br label [[TMP16]] -; IR-ITERATIVE: 16: -; IR-ITERATIVE-NEXT: [[TMP17:%.*]] = phi float [ poison, [[TMP2]] ], [ [[TMP15]], [[TMP14]] ] -; IR-ITERATIVE-NEXT: [[TMP18:%.*]] = bitcast float [[TMP17]] to i32 -; IR-ITERATIVE-NEXT: [[TMP19:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP18]]) #[[ATTR7]] -; IR-ITERATIVE-NEXT: [[TMP20:%.*]] = bitcast i32 [[TMP19]] to float -; IR-ITERATIVE-NEXT: [[TMP21:%.*]] = call float @llvm.experimental.constrained.uitofp.f32.i32(i32 [[TMP8]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]] -; IR-ITERATIVE-NEXT: [[TMP22:%.*]] = call float @llvm.experimental.constrained.fmul.f32(float [[VAL]], float [[TMP21]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]] -; IR-ITERATIVE-NEXT: [[TMP23:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[TMP20]], float [[TMP22]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]] -; IR-ITERATIVE-NEXT: br label [[TMP24]] -; IR-ITERATIVE: 24: -; IR-ITERATIVE-NEXT: [[TMP25:%.*]] = phi float [ poison, [[TMP0:%.*]] ], [ [[TMP23]], [[TMP16]] ] -; IR-ITERATIVE-NEXT: ret float [[TMP25]] +; IR-ITERATIVE-NEXT: [[TMP9:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR7]] +; IR-ITERATIVE-NEXT: br label [[COMPUTELOOP:%.*]] +; IR-ITERATIVE: 10: +; IR-ITERATIVE-NEXT: [[TMP11:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[TMP29:%.*]] monotonic, align 4 +; IR-ITERATIVE-NEXT: br label [[TMP12:%.*]] +; IR-ITERATIVE: 12: +; IR-ITERATIVE-NEXT: [[TMP13:%.*]] = phi float [ poison, [[COMPUTEEND:%.*]] ], [ [[TMP11]], [[TMP10:%.*]] ] +; IR-ITERATIVE-NEXT: [[TMP14:%.*]] = bitcast float [[TMP13]] to i32 +; IR-ITERATIVE-NEXT: [[TMP15:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP14]]) #[[ATTR7]] +; IR-ITERATIVE-NEXT: [[TMP16:%.*]] = bitcast i32 [[TMP15]] to float +; IR-ITERATIVE-NEXT: [[TMP17:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[TMP16]], float [[TMP28:%.*]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]] +; IR-ITERATIVE-NEXT: br label [[TMP18]] +; IR-ITERATIVE: 18: +; IR-ITERATIVE-NEXT: [[TMP19:%.*]] = phi float [ poison, [[TMP0:%.*]] ], [ [[TMP17]], [[TMP12]] ] +; IR-ITERATIVE-NEXT: ret float [[TMP19]] +; IR-ITERATIVE: ComputeLoop: +; IR-ITERATIVE-NEXT: [[ACCUMULATOR:%.*]] = phi float [ -0.000000e+00, [[TMP2]] ], [ [[TMP29]], [[COMPUTELOOP]] ] +; IR-ITERATIVE-NEXT: [[OLDVALUEPHI:%.*]] = phi float [ poison, [[TMP2]] ], [ [[TMP28]], [[COMPUTELOOP]] ] +; IR-ITERATIVE-NEXT: [[ACTIVEBITS:%.*]] = phi i64 [ [[TMP9]], [[TMP2]] ], [ [[TMP32:%.*]], [[COMPUTELOOP]] ] +; IR-ITERATIVE-NEXT: [[TMP20:%.*]] = call i64 @llvm.cttz.i64(i64 [[ACTIVEBITS]], i1 true) #[[ATTR7]] +; IR-ITERATIVE-NEXT: [[TMP21:%.*]] = trunc i64 [[TMP20]] to i32 +; IR-ITERATIVE-NEXT: [[TMP22:%.*]] = bitcast float [[VAL:%.*]] to i32 +; IR-ITERATIVE-NEXT: [[TMP23:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[TMP22]], i32 [[TMP21]]) #[[ATTR7]] +; IR-ITERATIVE-NEXT: [[TMP24:%.*]] = bitcast i32 [[TMP23]] to float +; IR-ITERATIVE-NEXT: [[TMP25:%.*]] = bitcast float [[ACCUMULATOR]] to i32 +; IR-ITERATIVE-NEXT: [[TMP26:%.*]] = bitcast float [[OLDVALUEPHI]] to i32 +; IR-ITERATIVE-NEXT: [[TMP27:%.*]] = call i32 @llvm.amdgcn.writelane.i32(i32 [[TMP25]], i32 [[TMP21]], i32 [[TMP26]]) #[[ATTR7]] +; IR-ITERATIVE-NEXT: [[TMP28]] = bitcast i32 [[TMP27]] to float +; IR-ITERATIVE-NEXT: [[TMP29]] = call float @llvm.experimental.constrained.fadd.f32(float [[ACCUMULATOR]], float [[TMP24]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]] +; IR-ITERATIVE-NEXT: [[TMP30:%.*]] = shl i64 1, [[TMP20]] +; IR-ITERATIVE-NEXT: [[TMP31:%.*]] = xor i64 [[TMP30]], -1 +; IR-ITERATIVE-NEXT: [[TMP32]] = and i64 [[ACTIVEBITS]], [[TMP31]] +; IR-ITERATIVE-NEXT: [[TMP33:%.*]] = icmp eq i64 [[TMP32]], 0 +; IR-ITERATIVE-NEXT: br i1 [[TMP33]], label [[COMPUTEEND]], label [[COMPUTELOOP]] +; IR-ITERATIVE: ComputeEnd: +; IR-ITERATIVE-NEXT: [[TMP34:%.*]] = icmp eq i32 [[TMP8]], 0 +; IR-ITERATIVE-NEXT: br i1 [[TMP34]], label [[TMP10]], label [[TMP12]] ; ; IR-DPP-LABEL: @global_atomic_fadd_uni_address_uni_value_system_scope_strictfp( ; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR8]] -; IR-DPP-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP24:%.*]] +; IR-DPP-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP40:%.*]] ; IR-DPP: 2: ; IR-DPP-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR8]] ; IR-DPP-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32 @@ -823,27 +967,43 @@ define amdgpu_ps float @global_atomic_fadd_uni_address_uni_value_system_scope_st ; IR-DPP-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32 ; IR-DPP-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR8]] ; IR-DPP-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR8]] -; IR-DPP-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]]) #[[ATTR8]] -; IR-DPP-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP9]] to i32 -; IR-DPP-NEXT: [[TMP11:%.*]] = call float @llvm.experimental.constrained.uitofp.f32.i32(i32 [[TMP10]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]] -; IR-DPP-NEXT: [[TMP12:%.*]] = call float @llvm.experimental.constrained.fmul.f32(float [[VAL:%.*]], float [[TMP11]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]] -; IR-DPP-NEXT: [[TMP13:%.*]] = icmp eq i32 [[TMP8]], 0 -; IR-DPP-NEXT: br i1 [[TMP13]], label [[TMP14:%.*]], label [[TMP16:%.*]] -; IR-DPP: 14: -; IR-DPP-NEXT: [[TMP15:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[TMP12]] monotonic, align 4 -; IR-DPP-NEXT: br label [[TMP16]] -; IR-DPP: 16: -; IR-DPP-NEXT: [[TMP17:%.*]] = phi float [ poison, [[TMP2]] ], [ [[TMP15]], [[TMP14]] ] -; IR-DPP-NEXT: [[TMP18:%.*]] = bitcast float [[TMP17]] to i32 -; IR-DPP-NEXT: [[TMP19:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP18]]) #[[ATTR8]] -; IR-DPP-NEXT: [[TMP20:%.*]] = bitcast i32 [[TMP19]] to float -; IR-DPP-NEXT: [[TMP21:%.*]] = call float @llvm.experimental.constrained.uitofp.f32.i32(i32 [[TMP8]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]] -; IR-DPP-NEXT: [[TMP22:%.*]] = call float @llvm.experimental.constrained.fmul.f32(float [[VAL]], float [[TMP21]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]] -; IR-DPP-NEXT: [[TMP23:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[TMP20]], float [[TMP22]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]] -; IR-DPP-NEXT: br label [[TMP24]] -; IR-DPP: 24: -; IR-DPP-NEXT: [[TMP25:%.*]] = phi float [ poison, [[TMP0:%.*]] ], [ [[TMP23]], [[TMP16]] ] -; IR-DPP-NEXT: ret float [[TMP25]] +; IR-DPP-NEXT: [[TMP9:%.*]] = bitcast float [[VAL:%.*]] to i32 +; IR-DPP-NEXT: [[TMP10:%.*]] = call i32 @llvm.amdgcn.set.inactive.i32(i32 [[TMP9]], i32 -2147483648) #[[ATTR8]] +; IR-DPP-NEXT: [[TMP11:%.*]] = bitcast i32 [[TMP10]] to float +; IR-DPP-NEXT: [[TMP12:%.*]] = bitcast i32 [[TMP9]] to float +; IR-DPP-NEXT: [[TMP13:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP11]], i32 273, i32 15, i32 15, i1 false) #[[ATTR8]] +; IR-DPP-NEXT: [[TMP14:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[TMP11]], float [[TMP13]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]] +; IR-DPP-NEXT: [[TMP15:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP14]], i32 274, i32 15, i32 15, i1 false) #[[ATTR8]] +; IR-DPP-NEXT: [[TMP16:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[TMP14]], float [[TMP15]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]] +; IR-DPP-NEXT: [[TMP17:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP16]], i32 276, i32 15, i32 15, i1 false) #[[ATTR8]] +; IR-DPP-NEXT: [[TMP18:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[TMP16]], float [[TMP17]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]] +; IR-DPP-NEXT: [[TMP19:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP18]], i32 280, i32 15, i32 15, i1 false) #[[ATTR8]] +; IR-DPP-NEXT: [[TMP20:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[TMP18]], float [[TMP19]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]] +; IR-DPP-NEXT: [[TMP21:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP20]], i32 322, i32 10, i32 15, i1 false) #[[ATTR8]] +; IR-DPP-NEXT: [[TMP22:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[TMP20]], float [[TMP21]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]] +; IR-DPP-NEXT: [[TMP23:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP22]], i32 323, i32 12, i32 15, i1 false) #[[ATTR8]] +; IR-DPP-NEXT: [[TMP24:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[TMP22]], float [[TMP23]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]] +; IR-DPP-NEXT: [[TMP25:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP24]], i32 312, i32 15, i32 15, i1 false) #[[ATTR8]] +; IR-DPP-NEXT: [[TMP26:%.*]] = bitcast float [[TMP24]] to i32 +; IR-DPP-NEXT: [[TMP27:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[TMP26]], i32 63) #[[ATTR8]] +; IR-DPP-NEXT: [[TMP28:%.*]] = bitcast i32 [[TMP27]] to float +; IR-DPP-NEXT: [[TMP29:%.*]] = call float @llvm.amdgcn.strict.wwm.f32(float [[TMP28]]) #[[ATTR8]] +; IR-DPP-NEXT: [[TMP30:%.*]] = icmp eq i32 [[TMP8]], 0 +; IR-DPP-NEXT: br i1 [[TMP30]], label [[TMP31:%.*]], label [[TMP33:%.*]] +; IR-DPP: 31: +; IR-DPP-NEXT: [[TMP32:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[TMP29]] monotonic, align 4 +; IR-DPP-NEXT: br label [[TMP33]] +; IR-DPP: 33: +; IR-DPP-NEXT: [[TMP34:%.*]] = phi float [ poison, [[TMP2]] ], [ [[TMP32]], [[TMP31]] ] +; IR-DPP-NEXT: [[TMP35:%.*]] = bitcast float [[TMP34]] to i32 +; IR-DPP-NEXT: [[TMP36:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP35]]) #[[ATTR8]] +; IR-DPP-NEXT: [[TMP37:%.*]] = bitcast i32 [[TMP36]] to float +; IR-DPP-NEXT: [[TMP38:%.*]] = call float @llvm.amdgcn.strict.wwm.f32(float [[TMP25]]) #[[ATTR8]] +; IR-DPP-NEXT: [[TMP39:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[TMP37]], float [[TMP38]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]] +; IR-DPP-NEXT: br label [[TMP40]] +; IR-DPP: 40: +; IR-DPP-NEXT: [[TMP41:%.*]] = phi float [ poison, [[TMP0:%.*]] ], [ [[TMP39]], [[TMP33]] ] +; IR-DPP-NEXT: ret float [[TMP41]] ; %result = atomicrmw fadd ptr addrspace(1) %ptr, float %val monotonic, align 4 ret float %result @@ -1060,42 +1220,8 @@ define amdgpu_ps float @global_atomic_fadd_div_address_div_value_system_scope_st define amdgpu_ps double @global_atomic_fadd_double_uni_address_uni_value_agent_scope_unsafe(ptr addrspace(1) inreg %ptr, double inreg %val) #0 { ; IR-LABEL: @global_atomic_fadd_double_uni_address_uni_value_agent_scope_unsafe( -; IR-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() -; IR-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP30:%.*]] -; IR: 2: -; IR-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) -; IR-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32 -; IR-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32 -; IR-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32 -; IR-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) -; IR-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) -; IR-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]]) -; IR-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP9]] to i32 -; IR-NEXT: [[TMP11:%.*]] = uitofp i32 [[TMP10]] to double -; IR-NEXT: [[TMP12:%.*]] = fmul double [[VAL:%.*]], [[TMP11]] -; IR-NEXT: [[TMP13:%.*]] = icmp eq i32 [[TMP8]], 0 -; IR-NEXT: br i1 [[TMP13]], label [[TMP14:%.*]], label [[TMP16:%.*]] -; IR: 14: -; IR-NEXT: [[TMP15:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[TMP12]] syncscope("agent") monotonic, align 4 -; IR-NEXT: br label [[TMP16]] -; IR: 16: -; IR-NEXT: [[TMP17:%.*]] = phi double [ poison, [[TMP2]] ], [ [[TMP15]], [[TMP14]] ] -; IR-NEXT: [[TMP18:%.*]] = bitcast double [[TMP17]] to i64 -; IR-NEXT: [[TMP19:%.*]] = trunc i64 [[TMP18]] to i32 -; IR-NEXT: [[TMP20:%.*]] = lshr i64 [[TMP18]], 32 -; IR-NEXT: [[TMP21:%.*]] = trunc i64 [[TMP20]] to i32 -; IR-NEXT: [[TMP22:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP19]]) -; IR-NEXT: [[TMP23:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP21]]) -; IR-NEXT: [[TMP24:%.*]] = insertelement <2 x i32> poison, i32 [[TMP22]], i32 0 -; IR-NEXT: [[TMP25:%.*]] = insertelement <2 x i32> [[TMP24]], i32 [[TMP23]], i32 1 -; IR-NEXT: [[TMP26:%.*]] = bitcast <2 x i32> [[TMP25]] to double -; IR-NEXT: [[TMP27:%.*]] = uitofp i32 [[TMP8]] to double -; IR-NEXT: [[TMP28:%.*]] = fmul double [[VAL]], [[TMP27]] -; IR-NEXT: [[TMP29:%.*]] = fadd double [[TMP26]], [[TMP28]] -; IR-NEXT: br label [[TMP30]] -; IR: 30: -; IR-NEXT: [[TMP31:%.*]] = phi double [ poison, [[TMP0:%.*]] ], [ [[TMP29]], [[TMP16]] ] -; IR-NEXT: ret double [[TMP31]] +; IR-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("agent") monotonic, align 4 +; IR-NEXT: ret double [[RESULT]] ; %result = atomicrmw fadd ptr addrspace(1) %ptr, double %val syncscope("agent") monotonic, align 4 ret double %result @@ -1111,81 +1237,9 @@ define amdgpu_ps double @global_atomic_fadd_double_uni_address_div_value_scope_a } define amdgpu_ps double @global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_strictfp(ptr addrspace(1) inreg %ptr, double inreg %val) #1 { -; IR-ITERATIVE-LABEL: @global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_strictfp( -; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR7]] -; IR-ITERATIVE-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP30:%.*]] -; IR-ITERATIVE: 2: -; IR-ITERATIVE-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR7]] -; IR-ITERATIVE-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32 -; IR-ITERATIVE-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32 -; IR-ITERATIVE-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32 -; IR-ITERATIVE-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR7]] -; IR-ITERATIVE-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR7]] -; IR-ITERATIVE-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]]) #[[ATTR7]] -; IR-ITERATIVE-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP9]] to i32 -; IR-ITERATIVE-NEXT: [[TMP11:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[TMP10]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]] -; IR-ITERATIVE-NEXT: [[TMP12:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[VAL:%.*]], double [[TMP11]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]] -; IR-ITERATIVE-NEXT: [[TMP13:%.*]] = icmp eq i32 [[TMP8]], 0 -; IR-ITERATIVE-NEXT: br i1 [[TMP13]], label [[TMP14:%.*]], label [[TMP16:%.*]] -; IR-ITERATIVE: 14: -; IR-ITERATIVE-NEXT: [[TMP15:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[TMP12]] syncscope("one-as") monotonic, align 8 -; IR-ITERATIVE-NEXT: br label [[TMP16]] -; IR-ITERATIVE: 16: -; IR-ITERATIVE-NEXT: [[TMP17:%.*]] = phi double [ poison, [[TMP2]] ], [ [[TMP15]], [[TMP14]] ] -; IR-ITERATIVE-NEXT: [[TMP18:%.*]] = bitcast double [[TMP17]] to i64 -; IR-ITERATIVE-NEXT: [[TMP19:%.*]] = trunc i64 [[TMP18]] to i32 -; IR-ITERATIVE-NEXT: [[TMP20:%.*]] = lshr i64 [[TMP18]], 32 -; IR-ITERATIVE-NEXT: [[TMP21:%.*]] = trunc i64 [[TMP20]] to i32 -; IR-ITERATIVE-NEXT: [[TMP22:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP19]]) #[[ATTR7]] -; IR-ITERATIVE-NEXT: [[TMP23:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP21]]) #[[ATTR7]] -; IR-ITERATIVE-NEXT: [[TMP24:%.*]] = insertelement <2 x i32> poison, i32 [[TMP22]], i32 0 -; IR-ITERATIVE-NEXT: [[TMP25:%.*]] = insertelement <2 x i32> [[TMP24]], i32 [[TMP23]], i32 1 -; IR-ITERATIVE-NEXT: [[TMP26:%.*]] = bitcast <2 x i32> [[TMP25]] to double -; IR-ITERATIVE-NEXT: [[TMP27:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[TMP8]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]] -; IR-ITERATIVE-NEXT: [[TMP28:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[VAL]], double [[TMP27]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]] -; IR-ITERATIVE-NEXT: [[TMP29:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[TMP26]], double [[TMP28]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]] -; IR-ITERATIVE-NEXT: br label [[TMP30]] -; IR-ITERATIVE: 30: -; IR-ITERATIVE-NEXT: [[TMP31:%.*]] = phi double [ poison, [[TMP0:%.*]] ], [ [[TMP29]], [[TMP16]] ] -; IR-ITERATIVE-NEXT: ret double [[TMP31]] -; -; IR-DPP-LABEL: @global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_strictfp( -; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR8]] -; IR-DPP-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP30:%.*]] -; IR-DPP: 2: -; IR-DPP-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR8]] -; IR-DPP-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32 -; IR-DPP-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32 -; IR-DPP-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32 -; IR-DPP-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR8]] -; IR-DPP-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR8]] -; IR-DPP-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]]) #[[ATTR8]] -; IR-DPP-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP9]] to i32 -; IR-DPP-NEXT: [[TMP11:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[TMP10]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]] -; IR-DPP-NEXT: [[TMP12:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[VAL:%.*]], double [[TMP11]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]] -; IR-DPP-NEXT: [[TMP13:%.*]] = icmp eq i32 [[TMP8]], 0 -; IR-DPP-NEXT: br i1 [[TMP13]], label [[TMP14:%.*]], label [[TMP16:%.*]] -; IR-DPP: 14: -; IR-DPP-NEXT: [[TMP15:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[TMP12]] syncscope("one-as") monotonic, align 8 -; IR-DPP-NEXT: br label [[TMP16]] -; IR-DPP: 16: -; IR-DPP-NEXT: [[TMP17:%.*]] = phi double [ poison, [[TMP2]] ], [ [[TMP15]], [[TMP14]] ] -; IR-DPP-NEXT: [[TMP18:%.*]] = bitcast double [[TMP17]] to i64 -; IR-DPP-NEXT: [[TMP19:%.*]] = trunc i64 [[TMP18]] to i32 -; IR-DPP-NEXT: [[TMP20:%.*]] = lshr i64 [[TMP18]], 32 -; IR-DPP-NEXT: [[TMP21:%.*]] = trunc i64 [[TMP20]] to i32 -; IR-DPP-NEXT: [[TMP22:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP19]]) #[[ATTR8]] -; IR-DPP-NEXT: [[TMP23:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP21]]) #[[ATTR8]] -; IR-DPP-NEXT: [[TMP24:%.*]] = insertelement <2 x i32> poison, i32 [[TMP22]], i32 0 -; IR-DPP-NEXT: [[TMP25:%.*]] = insertelement <2 x i32> [[TMP24]], i32 [[TMP23]], i32 1 -; IR-DPP-NEXT: [[TMP26:%.*]] = bitcast <2 x i32> [[TMP25]] to double -; IR-DPP-NEXT: [[TMP27:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[TMP8]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]] -; IR-DPP-NEXT: [[TMP28:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[VAL]], double [[TMP27]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]] -; IR-DPP-NEXT: [[TMP29:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[TMP26]], double [[TMP28]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]] -; IR-DPP-NEXT: br label [[TMP30]] -; IR-DPP: 30: -; IR-DPP-NEXT: [[TMP31:%.*]] = phi double [ poison, [[TMP0:%.*]] ], [ [[TMP29]], [[TMP16]] ] -; IR-DPP-NEXT: ret double [[TMP31]] +; IR-LABEL: @global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_strictfp( +; IR-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("one-as") monotonic, align 8 +; IR-NEXT: ret double [[RESULT]] ; %result = atomicrmw fadd ptr addrspace(1) %ptr, double %val syncscope("one-as") monotonic ret double %result @@ -1201,81 +1255,9 @@ define amdgpu_ps double @global_atomic_fadd_double_uni_address_div_value_one_as_ } define amdgpu_ps double @global_atomic_fsub_double_uni_address_uni_value_agent_scope_strictfp(ptr addrspace(1) inreg %ptr, double inreg %val) #2 { -; IR-ITERATIVE-LABEL: @global_atomic_fsub_double_uni_address_uni_value_agent_scope_strictfp( -; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR7]] -; IR-ITERATIVE-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP30:%.*]] -; IR-ITERATIVE: 2: -; IR-ITERATIVE-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR7]] -; IR-ITERATIVE-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32 -; IR-ITERATIVE-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32 -; IR-ITERATIVE-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32 -; IR-ITERATIVE-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR7]] -; IR-ITERATIVE-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR7]] -; IR-ITERATIVE-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]]) #[[ATTR7]] -; IR-ITERATIVE-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP9]] to i32 -; IR-ITERATIVE-NEXT: [[TMP11:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[TMP10]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]] -; IR-ITERATIVE-NEXT: [[TMP12:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[VAL:%.*]], double [[TMP11]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]] -; IR-ITERATIVE-NEXT: [[TMP13:%.*]] = icmp eq i32 [[TMP8]], 0 -; IR-ITERATIVE-NEXT: br i1 [[TMP13]], label [[TMP14:%.*]], label [[TMP16:%.*]] -; IR-ITERATIVE: 14: -; IR-ITERATIVE-NEXT: [[TMP15:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[TMP12]] syncscope("agent") monotonic, align 8 -; IR-ITERATIVE-NEXT: br label [[TMP16]] -; IR-ITERATIVE: 16: -; IR-ITERATIVE-NEXT: [[TMP17:%.*]] = phi double [ poison, [[TMP2]] ], [ [[TMP15]], [[TMP14]] ] -; IR-ITERATIVE-NEXT: [[TMP18:%.*]] = bitcast double [[TMP17]] to i64 -; IR-ITERATIVE-NEXT: [[TMP19:%.*]] = trunc i64 [[TMP18]] to i32 -; IR-ITERATIVE-NEXT: [[TMP20:%.*]] = lshr i64 [[TMP18]], 32 -; IR-ITERATIVE-NEXT: [[TMP21:%.*]] = trunc i64 [[TMP20]] to i32 -; IR-ITERATIVE-NEXT: [[TMP22:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP19]]) #[[ATTR7]] -; IR-ITERATIVE-NEXT: [[TMP23:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP21]]) #[[ATTR7]] -; IR-ITERATIVE-NEXT: [[TMP24:%.*]] = insertelement <2 x i32> poison, i32 [[TMP22]], i32 0 -; IR-ITERATIVE-NEXT: [[TMP25:%.*]] = insertelement <2 x i32> [[TMP24]], i32 [[TMP23]], i32 1 -; IR-ITERATIVE-NEXT: [[TMP26:%.*]] = bitcast <2 x i32> [[TMP25]] to double -; IR-ITERATIVE-NEXT: [[TMP27:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[TMP8]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]] -; IR-ITERATIVE-NEXT: [[TMP28:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[VAL]], double [[TMP27]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]] -; IR-ITERATIVE-NEXT: [[TMP29:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[TMP26]], double [[TMP28]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]] -; IR-ITERATIVE-NEXT: br label [[TMP30]] -; IR-ITERATIVE: 30: -; IR-ITERATIVE-NEXT: [[TMP31:%.*]] = phi double [ poison, [[TMP0:%.*]] ], [ [[TMP29]], [[TMP16]] ] -; IR-ITERATIVE-NEXT: ret double [[TMP31]] -; -; IR-DPP-LABEL: @global_atomic_fsub_double_uni_address_uni_value_agent_scope_strictfp( -; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR8]] -; IR-DPP-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP30:%.*]] -; IR-DPP: 2: -; IR-DPP-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR8]] -; IR-DPP-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32 -; IR-DPP-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32 -; IR-DPP-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32 -; IR-DPP-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR8]] -; IR-DPP-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR8]] -; IR-DPP-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]]) #[[ATTR8]] -; IR-DPP-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP9]] to i32 -; IR-DPP-NEXT: [[TMP11:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[TMP10]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]] -; IR-DPP-NEXT: [[TMP12:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[VAL:%.*]], double [[TMP11]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]] -; IR-DPP-NEXT: [[TMP13:%.*]] = icmp eq i32 [[TMP8]], 0 -; IR-DPP-NEXT: br i1 [[TMP13]], label [[TMP14:%.*]], label [[TMP16:%.*]] -; IR-DPP: 14: -; IR-DPP-NEXT: [[TMP15:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[TMP12]] syncscope("agent") monotonic, align 8 -; IR-DPP-NEXT: br label [[TMP16]] -; IR-DPP: 16: -; IR-DPP-NEXT: [[TMP17:%.*]] = phi double [ poison, [[TMP2]] ], [ [[TMP15]], [[TMP14]] ] -; IR-DPP-NEXT: [[TMP18:%.*]] = bitcast double [[TMP17]] to i64 -; IR-DPP-NEXT: [[TMP19:%.*]] = trunc i64 [[TMP18]] to i32 -; IR-DPP-NEXT: [[TMP20:%.*]] = lshr i64 [[TMP18]], 32 -; IR-DPP-NEXT: [[TMP21:%.*]] = trunc i64 [[TMP20]] to i32 -; IR-DPP-NEXT: [[TMP22:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP19]]) #[[ATTR8]] -; IR-DPP-NEXT: [[TMP23:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP21]]) #[[ATTR8]] -; IR-DPP-NEXT: [[TMP24:%.*]] = insertelement <2 x i32> poison, i32 [[TMP22]], i32 0 -; IR-DPP-NEXT: [[TMP25:%.*]] = insertelement <2 x i32> [[TMP24]], i32 [[TMP23]], i32 1 -; IR-DPP-NEXT: [[TMP26:%.*]] = bitcast <2 x i32> [[TMP25]] to double -; IR-DPP-NEXT: [[TMP27:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[TMP8]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]] -; IR-DPP-NEXT: [[TMP28:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[VAL]], double [[TMP27]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]] -; IR-DPP-NEXT: [[TMP29:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[TMP26]], double [[TMP28]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]] -; IR-DPP-NEXT: br label [[TMP30]] -; IR-DPP: 30: -; IR-DPP-NEXT: [[TMP31:%.*]] = phi double [ poison, [[TMP0:%.*]] ], [ [[TMP29]], [[TMP16]] ] -; IR-DPP-NEXT: ret double [[TMP31]] +; IR-LABEL: @global_atomic_fsub_double_uni_address_uni_value_agent_scope_strictfp( +; IR-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("agent") monotonic, align 8 +; IR-NEXT: ret double [[RESULT]] ; %result = atomicrmw fadd ptr addrspace(1) %ptr, double %val syncscope("agent") monotonic ret double %result @@ -1421,81 +1403,9 @@ define amdgpu_ps double @global_atomic__fmax_double_uni_address_div_value_agent_ } define amdgpu_ps double @global_atomic_fadd_double_uni_address_uni_value_system_scope_strictfp(ptr addrspace(1) inreg %ptr, double inreg %val) #2 { -; IR-ITERATIVE-LABEL: @global_atomic_fadd_double_uni_address_uni_value_system_scope_strictfp( -; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR7]] -; IR-ITERATIVE-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP30:%.*]] -; IR-ITERATIVE: 2: -; IR-ITERATIVE-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR7]] -; IR-ITERATIVE-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32 -; IR-ITERATIVE-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32 -; IR-ITERATIVE-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32 -; IR-ITERATIVE-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR7]] -; IR-ITERATIVE-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR7]] -; IR-ITERATIVE-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]]) #[[ATTR7]] -; IR-ITERATIVE-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP9]] to i32 -; IR-ITERATIVE-NEXT: [[TMP11:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[TMP10]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]] -; IR-ITERATIVE-NEXT: [[TMP12:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[VAL:%.*]], double [[TMP11]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]] -; IR-ITERATIVE-NEXT: [[TMP13:%.*]] = icmp eq i32 [[TMP8]], 0 -; IR-ITERATIVE-NEXT: br i1 [[TMP13]], label [[TMP14:%.*]], label [[TMP16:%.*]] -; IR-ITERATIVE: 14: -; IR-ITERATIVE-NEXT: [[TMP15:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[TMP12]] monotonic, align 4 -; IR-ITERATIVE-NEXT: br label [[TMP16]] -; IR-ITERATIVE: 16: -; IR-ITERATIVE-NEXT: [[TMP17:%.*]] = phi double [ poison, [[TMP2]] ], [ [[TMP15]], [[TMP14]] ] -; IR-ITERATIVE-NEXT: [[TMP18:%.*]] = bitcast double [[TMP17]] to i64 -; IR-ITERATIVE-NEXT: [[TMP19:%.*]] = trunc i64 [[TMP18]] to i32 -; IR-ITERATIVE-NEXT: [[TMP20:%.*]] = lshr i64 [[TMP18]], 32 -; IR-ITERATIVE-NEXT: [[TMP21:%.*]] = trunc i64 [[TMP20]] to i32 -; IR-ITERATIVE-NEXT: [[TMP22:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP19]]) #[[ATTR7]] -; IR-ITERATIVE-NEXT: [[TMP23:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP21]]) #[[ATTR7]] -; IR-ITERATIVE-NEXT: [[TMP24:%.*]] = insertelement <2 x i32> poison, i32 [[TMP22]], i32 0 -; IR-ITERATIVE-NEXT: [[TMP25:%.*]] = insertelement <2 x i32> [[TMP24]], i32 [[TMP23]], i32 1 -; IR-ITERATIVE-NEXT: [[TMP26:%.*]] = bitcast <2 x i32> [[TMP25]] to double -; IR-ITERATIVE-NEXT: [[TMP27:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[TMP8]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]] -; IR-ITERATIVE-NEXT: [[TMP28:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[VAL]], double [[TMP27]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]] -; IR-ITERATIVE-NEXT: [[TMP29:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[TMP26]], double [[TMP28]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]] -; IR-ITERATIVE-NEXT: br label [[TMP30]] -; IR-ITERATIVE: 30: -; IR-ITERATIVE-NEXT: [[TMP31:%.*]] = phi double [ poison, [[TMP0:%.*]] ], [ [[TMP29]], [[TMP16]] ] -; IR-ITERATIVE-NEXT: ret double [[TMP31]] -; -; IR-DPP-LABEL: @global_atomic_fadd_double_uni_address_uni_value_system_scope_strictfp( -; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR8]] -; IR-DPP-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP30:%.*]] -; IR-DPP: 2: -; IR-DPP-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR8]] -; IR-DPP-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32 -; IR-DPP-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32 -; IR-DPP-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32 -; IR-DPP-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR8]] -; IR-DPP-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR8]] -; IR-DPP-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]]) #[[ATTR8]] -; IR-DPP-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP9]] to i32 -; IR-DPP-NEXT: [[TMP11:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[TMP10]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]] -; IR-DPP-NEXT: [[TMP12:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[VAL:%.*]], double [[TMP11]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]] -; IR-DPP-NEXT: [[TMP13:%.*]] = icmp eq i32 [[TMP8]], 0 -; IR-DPP-NEXT: br i1 [[TMP13]], label [[TMP14:%.*]], label [[TMP16:%.*]] -; IR-DPP: 14: -; IR-DPP-NEXT: [[TMP15:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[TMP12]] monotonic, align 4 -; IR-DPP-NEXT: br label [[TMP16]] -; IR-DPP: 16: -; IR-DPP-NEXT: [[TMP17:%.*]] = phi double [ poison, [[TMP2]] ], [ [[TMP15]], [[TMP14]] ] -; IR-DPP-NEXT: [[TMP18:%.*]] = bitcast double [[TMP17]] to i64 -; IR-DPP-NEXT: [[TMP19:%.*]] = trunc i64 [[TMP18]] to i32 -; IR-DPP-NEXT: [[TMP20:%.*]] = lshr i64 [[TMP18]], 32 -; IR-DPP-NEXT: [[TMP21:%.*]] = trunc i64 [[TMP20]] to i32 -; IR-DPP-NEXT: [[TMP22:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP19]]) #[[ATTR8]] -; IR-DPP-NEXT: [[TMP23:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP21]]) #[[ATTR8]] -; IR-DPP-NEXT: [[TMP24:%.*]] = insertelement <2 x i32> poison, i32 [[TMP22]], i32 0 -; IR-DPP-NEXT: [[TMP25:%.*]] = insertelement <2 x i32> [[TMP24]], i32 [[TMP23]], i32 1 -; IR-DPP-NEXT: [[TMP26:%.*]] = bitcast <2 x i32> [[TMP25]] to double -; IR-DPP-NEXT: [[TMP27:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[TMP8]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]] -; IR-DPP-NEXT: [[TMP28:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[VAL]], double [[TMP27]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]] -; IR-DPP-NEXT: [[TMP29:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[TMP26]], double [[TMP28]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]] -; IR-DPP-NEXT: br label [[TMP30]] -; IR-DPP: 30: -; IR-DPP-NEXT: [[TMP31:%.*]] = phi double [ poison, [[TMP0:%.*]] ], [ [[TMP29]], [[TMP16]] ] -; IR-DPP-NEXT: ret double [[TMP31]] +; IR-LABEL: @global_atomic_fadd_double_uni_address_uni_value_system_scope_strictfp( +; IR-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] monotonic, align 4 +; IR-NEXT: ret double [[RESULT]] ; %result = atomicrmw fadd ptr addrspace(1) %ptr, double %val monotonic, align 4 ret double %result diff --git a/llvm/test/CodeGen/AMDGPU/local-atomicrmw-fadd.ll b/llvm/test/CodeGen/AMDGPU/local-atomicrmw-fadd.ll index 99b67a278a027..9744bd42786ea 100644 --- a/llvm/test/CodeGen/AMDGPU/local-atomicrmw-fadd.ll +++ b/llvm/test/CodeGen/AMDGPU/local-atomicrmw-fadd.ll @@ -6979,36 +6979,50 @@ define void @local_atomic_fadd_noret_v2bf16__ofset(ptr addrspace(3) %ptr, <2 x b define amdgpu_kernel void @local_ds_fadd(ptr addrspace(1) %out, ptr addrspace(3) %ptrf, i32 %idx) { ; GFX12-LABEL: local_ds_fadd: ; GFX12: ; %bb.0: +; GFX12-NEXT: v_mov_b32_e32 v1, 0x42280000 +; GFX12-NEXT: s_mov_b32 s2, exec_lo +; GFX12-NEXT: s_brev_b32 s4, 1 +; GFX12-NEXT: ; implicit-def: $vgpr0 +; GFX12-NEXT: .LBB28_1: ; %ComputeLoop +; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX12-NEXT: s_ctz_i32_b32 s3, s2 +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) +; GFX12-NEXT: v_readlane_b32 s5, v1, s3 +; GFX12-NEXT: s_lshl_b32 s6, 1, s3 +; GFX12-NEXT: v_writelane_b32 v0, s4, s3 +; GFX12-NEXT: s_and_not1_b32 s2, s2, s6 +; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX12-NEXT: s_cmp_lg_u32 s2, 0 +; GFX12-NEXT: s_add_f32 s4, s4, s5 +; GFX12-NEXT: s_cbranch_scc1 .LBB28_1 +; GFX12-NEXT: ; %bb.2: ; %ComputeEnd ; GFX12-NEXT: s_load_b64 s[2:3], s[0:1], 0x8 -; GFX12-NEXT: s_mov_b32 s5, exec_lo -; GFX12-NEXT: s_mov_b32 s4, exec_lo -; GFX12-NEXT: v_mbcnt_lo_u32_b32 v0, s5, 0 +; GFX12-NEXT: v_mbcnt_lo_u32_b32 v1, exec_lo, 0 +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(SALU_CYCLE_1) +; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v1 ; GFX12-NEXT: ; implicit-def: $vgpr1 ; GFX12-NEXT: s_wait_kmcnt 0x0 ; GFX12-NEXT: s_add_co_i32 s3, s3, 4 -; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX12-NEXT: v_cmpx_eq_u32_e32 0, v0 -; GFX12-NEXT: s_cbranch_execz .LBB28_2 -; GFX12-NEXT: ; %bb.1: -; GFX12-NEXT: s_bcnt1_i32_b32 s5, s5 +; GFX12-NEXT: s_and_saveexec_b32 s5, vcc_lo +; GFX12-NEXT: s_xor_b32 s5, exec_lo, s5 +; GFX12-NEXT: s_cbranch_execz .LBB28_4 +; GFX12-NEXT: ; %bb.3: +; GFX12-NEXT: s_lshl_b32 s6, s3, 3 ; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; GFX12-NEXT: v_cvt_f32_ubyte0_e32 v1, s5 -; GFX12-NEXT: s_lshl_b32 s5, s3, 3 -; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) -; GFX12-NEXT: v_dual_mov_b32 v2, s5 :: v_dual_mul_f32 v1, 0x42280000, v1 -; GFX12-NEXT: ds_add_rtn_f32 v1, v2, v1 +; GFX12-NEXT: v_dual_mov_b32 v2, s4 :: v_dual_mov_b32 v1, s6 +; GFX12-NEXT: ds_add_rtn_f32 v1, v1, v2 ; GFX12-NEXT: s_wait_dscnt 0x0 ; GFX12-NEXT: global_inv scope:SCOPE_SE -; GFX12-NEXT: .LBB28_2: -; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s4 +; GFX12-NEXT: .LBB28_4: +; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s5 ; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_3) | instid1(VALU_DEP_1) ; GFX12-NEXT: s_mov_b32 s6, exec_lo ; GFX12-NEXT: v_readfirstlane_b32 s5, v1 ; GFX12-NEXT: v_mbcnt_lo_u32_b32 v2, s6, 0 ; GFX12-NEXT: s_mov_b32 s4, exec_lo ; GFX12-NEXT: v_cmpx_eq_u32_e32 0, v2 -; GFX12-NEXT: s_cbranch_execz .LBB28_4 -; GFX12-NEXT: ; %bb.3: +; GFX12-NEXT: s_cbranch_execz .LBB28_6 +; GFX12-NEXT: ; %bb.5: ; GFX12-NEXT: s_bcnt1_i32_b32 s6, s6 ; GFX12-NEXT: s_lshl_b32 s3, s3, 4 ; GFX12-NEXT: v_cvt_f32_ubyte0_e32 v1, s6 @@ -7017,16 +7031,13 @@ define amdgpu_kernel void @local_ds_fadd(ptr addrspace(1) %out, ptr addrspace(3) ; GFX12-NEXT: ds_add_f32 v2, v1 ; GFX12-NEXT: s_wait_dscnt 0x0 ; GFX12-NEXT: global_inv scope:SCOPE_SE -; GFX12-NEXT: .LBB28_4: +; GFX12-NEXT: .LBB28_6: ; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s4 -; GFX12-NEXT: v_cvt_f32_ubyte0_e32 v0, v0 +; GFX12-NEXT: v_add_f32_e32 v1, s5, v0 ; GFX12-NEXT: s_mov_b32 s4, exec_lo ; GFX12-NEXT: s_brev_b32 s3, 1 -; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-NEXT: v_mul_f32_e32 v0, 0x42280000, v0 -; GFX12-NEXT: v_add_f32_e32 v1, s5, v0 ; GFX12-NEXT: ; implicit-def: $vgpr0 -; GFX12-NEXT: .LBB28_5: ; %ComputeLoop +; GFX12-NEXT: .LBB28_7: ; %ComputeLoop1 ; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-NEXT: s_ctz_i32_b32 s5, s4 ; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) @@ -7037,21 +7048,21 @@ define amdgpu_kernel void @local_ds_fadd(ptr addrspace(1) %out, ptr addrspace(3) ; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX12-NEXT: s_cmp_lg_u32 s4, 0 ; GFX12-NEXT: s_add_f32 s3, s3, s6 -; GFX12-NEXT: s_cbranch_scc1 .LBB28_5 -; GFX12-NEXT: ; %bb.6: ; %ComputeEnd +; GFX12-NEXT: s_cbranch_scc1 .LBB28_7 +; GFX12-NEXT: ; %bb.8: ; %ComputeEnd2 ; GFX12-NEXT: v_mbcnt_lo_u32_b32 v1, exec_lo, 0 ; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) ; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v1 ; GFX12-NEXT: ; implicit-def: $vgpr1 ; GFX12-NEXT: s_and_saveexec_b32 s4, vcc_lo ; GFX12-NEXT: s_xor_b32 s4, exec_lo, s4 -; GFX12-NEXT: s_cbranch_execz .LBB28_8 -; GFX12-NEXT: ; %bb.7: +; GFX12-NEXT: s_cbranch_execz .LBB28_10 +; GFX12-NEXT: ; %bb.9: ; GFX12-NEXT: v_dual_mov_b32 v1, s2 :: v_dual_mov_b32 v2, s3 ; GFX12-NEXT: ds_add_rtn_f32 v1, v1, v2 ; GFX12-NEXT: s_wait_dscnt 0x0 ; GFX12-NEXT: global_inv scope:SCOPE_SE -; GFX12-NEXT: .LBB28_8: +; GFX12-NEXT: .LBB28_10: ; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s4 ; GFX12-NEXT: s_load_b64 s[0:1], s[0:1], 0x0 ; GFX12-NEXT: v_readfirstlane_b32 s2, v1 @@ -7065,34 +7076,48 @@ define amdgpu_kernel void @local_ds_fadd(ptr addrspace(1) %out, ptr addrspace(3) ; ; GFX940-LABEL: local_ds_fadd: ; GFX940: ; %bb.0: +; GFX940-NEXT: s_mov_b64 s[2:3], exec +; GFX940-NEXT: v_bfrev_b32_e32 v1, 1 +; GFX940-NEXT: v_mov_b32_e32 v2, 0x42280000 +; GFX940-NEXT: ; implicit-def: $vgpr0 +; GFX940-NEXT: .LBB28_1: ; %ComputeLoop +; GFX940-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX940-NEXT: s_ff1_i32_b64 s6, s[2:3] +; GFX940-NEXT: s_lshl_b64 s[4:5], 1, s6 +; GFX940-NEXT: v_readfirstlane_b32 s7, v1 +; GFX940-NEXT: v_readlane_b32 s8, v2, s6 +; GFX940-NEXT: s_mov_b32 m0, s6 +; GFX940-NEXT: s_andn2_b64 s[2:3], s[2:3], s[4:5] +; GFX940-NEXT: v_writelane_b32 v0, s7, m0 +; GFX940-NEXT: s_cmp_lg_u64 s[2:3], 0 +; GFX940-NEXT: v_add_f32_e32 v1, s8, v1 +; GFX940-NEXT: s_cbranch_scc1 .LBB28_1 +; GFX940-NEXT: ; %bb.2: ; %ComputeEnd ; GFX940-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x8 -; GFX940-NEXT: s_mov_b64 s[4:5], exec -; GFX940-NEXT: v_mbcnt_lo_u32_b32 v0, s4, 0 -; GFX940-NEXT: v_mbcnt_hi_u32_b32 v0, s5, v0 -; GFX940-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0 +; GFX940-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0 +; GFX940-NEXT: v_mbcnt_hi_u32_b32 v2, exec_hi, v2 +; GFX940-NEXT: v_cmp_eq_u32_e32 vcc, 0, v2 +; GFX940-NEXT: ; implicit-def: $vgpr2 ; GFX940-NEXT: s_waitcnt lgkmcnt(0) ; GFX940-NEXT: s_add_i32 s3, s3, 4 -; GFX940-NEXT: ; implicit-def: $vgpr1 -; GFX940-NEXT: s_and_saveexec_b64 s[6:7], vcc -; GFX940-NEXT: s_cbranch_execz .LBB28_2 -; GFX940-NEXT: ; %bb.1: -; GFX940-NEXT: s_bcnt1_i32_b64 s4, s[4:5] -; GFX940-NEXT: s_lshl_b32 s8, s3, 3 -; GFX940-NEXT: v_cvt_f32_ubyte0_e32 v1, s4 -; GFX940-NEXT: v_mul_f32_e32 v1, 0x42280000, v1 -; GFX940-NEXT: v_mov_b32_e32 v2, s8 -; GFX940-NEXT: ds_add_rtn_f32 v1, v2, v1 +; GFX940-NEXT: s_and_saveexec_b64 s[4:5], vcc +; GFX940-NEXT: s_xor_b64 s[4:5], exec, s[4:5] +; GFX940-NEXT: s_cbranch_execz .LBB28_4 +; GFX940-NEXT: ; %bb.3: +; GFX940-NEXT: s_lshl_b32 s6, s3, 3 +; GFX940-NEXT: v_mov_b32_e32 v2, s6 +; GFX940-NEXT: ds_add_rtn_f32 v2, v2, v1 ; GFX940-NEXT: s_waitcnt lgkmcnt(0) -; GFX940-NEXT: .LBB28_2: -; GFX940-NEXT: s_or_b64 exec, exec, s[6:7] +; GFX940-NEXT: .LBB28_4: +; GFX940-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX940-NEXT: s_mov_b64 s[6:7], exec -; GFX940-NEXT: v_readfirstlane_b32 s8, v1 ; GFX940-NEXT: v_mbcnt_lo_u32_b32 v1, s6, 0 ; GFX940-NEXT: v_mbcnt_hi_u32_b32 v1, s7, v1 +; GFX940-NEXT: v_readfirstlane_b32 s8, v2 ; GFX940-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1 ; GFX940-NEXT: s_and_saveexec_b64 s[4:5], vcc -; GFX940-NEXT: s_cbranch_execz .LBB28_4 -; GFX940-NEXT: ; %bb.3: +; GFX940-NEXT: s_cbranch_execz .LBB28_6 +; GFX940-NEXT: ; %bb.5: ; GFX940-NEXT: s_bcnt1_i32_b64 s6, s[6:7] ; GFX940-NEXT: v_cvt_f32_ubyte0_e32 v1, s6 ; GFX940-NEXT: s_lshl_b32 s3, s3, 4 @@ -7100,15 +7125,13 @@ define amdgpu_kernel void @local_ds_fadd(ptr addrspace(1) %out, ptr addrspace(3) ; GFX940-NEXT: v_mov_b32_e32 v2, s3 ; GFX940-NEXT: ds_add_f32 v2, v1 ; GFX940-NEXT: s_waitcnt lgkmcnt(0) -; GFX940-NEXT: .LBB28_4: +; GFX940-NEXT: .LBB28_6: ; GFX940-NEXT: s_or_b64 exec, exec, s[4:5] -; GFX940-NEXT: v_cvt_f32_ubyte0_e32 v0, v0 -; GFX940-NEXT: v_mul_f32_e32 v0, 0x42280000, v0 ; GFX940-NEXT: s_mov_b64 s[4:5], exec ; GFX940-NEXT: v_add_f32_e32 v2, s8, v0 ; GFX940-NEXT: v_bfrev_b32_e32 v1, 1 ; GFX940-NEXT: ; implicit-def: $vgpr0 -; GFX940-NEXT: .LBB28_5: ; %ComputeLoop +; GFX940-NEXT: .LBB28_7: ; %ComputeLoop1 ; GFX940-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX940-NEXT: s_ff1_i32_b64 s3, s[4:5] ; GFX940-NEXT: s_lshl_b64 s[6:7], 1, s3 @@ -7119,20 +7142,20 @@ define amdgpu_kernel void @local_ds_fadd(ptr addrspace(1) %out, ptr addrspace(3) ; GFX940-NEXT: v_writelane_b32 v0, s8, m0 ; GFX940-NEXT: s_cmp_lg_u64 s[4:5], 0 ; GFX940-NEXT: v_add_f32_e32 v1, s9, v1 -; GFX940-NEXT: s_cbranch_scc1 .LBB28_5 -; GFX940-NEXT: ; %bb.6: ; %ComputeEnd +; GFX940-NEXT: s_cbranch_scc1 .LBB28_7 +; GFX940-NEXT: ; %bb.8: ; %ComputeEnd2 ; GFX940-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0 ; GFX940-NEXT: v_mbcnt_hi_u32_b32 v2, exec_hi, v2 ; GFX940-NEXT: v_cmp_eq_u32_e32 vcc, 0, v2 ; GFX940-NEXT: ; implicit-def: $vgpr2 ; GFX940-NEXT: s_and_saveexec_b64 s[4:5], vcc ; GFX940-NEXT: s_xor_b64 s[4:5], exec, s[4:5] -; GFX940-NEXT: s_cbranch_execz .LBB28_8 -; GFX940-NEXT: ; %bb.7: +; GFX940-NEXT: s_cbranch_execz .LBB28_10 +; GFX940-NEXT: ; %bb.9: ; GFX940-NEXT: v_mov_b32_e32 v2, s2 ; GFX940-NEXT: ds_add_rtn_f32 v2, v2, v1 ; GFX940-NEXT: s_waitcnt lgkmcnt(0) -; GFX940-NEXT: .LBB28_8: +; GFX940-NEXT: .LBB28_10: ; GFX940-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX940-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x0 ; GFX940-NEXT: v_readfirstlane_b32 s2, v2 @@ -7145,36 +7168,52 @@ define amdgpu_kernel void @local_ds_fadd(ptr addrspace(1) %out, ptr addrspace(3) ; ; GFX11-LABEL: local_ds_fadd: ; GFX11: ; %bb.0: +; GFX11-NEXT: v_bfrev_b32_e32 v1, 1 +; GFX11-NEXT: v_mov_b32_e32 v2, 0x42280000 +; GFX11-NEXT: s_mov_b32 s2, exec_lo +; GFX11-NEXT: ; implicit-def: $vgpr0 +; GFX11-NEXT: .LBB28_1: ; %ComputeLoop +; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11-NEXT: s_ctz_i32_b32 s3, s2 +; GFX11-NEXT: v_readfirstlane_b32 s4, v1 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX11-NEXT: v_readlane_b32 s5, v2, s3 +; GFX11-NEXT: s_lshl_b32 s6, 1, s3 +; GFX11-NEXT: s_and_not1_b32 s2, s2, s6 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX11-NEXT: v_writelane_b32 v0, s4, s3 +; GFX11-NEXT: v_add_f32_e32 v1, s5, v1 +; GFX11-NEXT: s_cmp_lg_u32 s2, 0 +; GFX11-NEXT: s_cbranch_scc1 .LBB28_1 +; GFX11-NEXT: ; %bb.2: ; %ComputeEnd ; GFX11-NEXT: s_load_b64 s[2:3], s[0:1], 0x8 -; GFX11-NEXT: s_mov_b32 s5, exec_lo -; GFX11-NEXT: s_mov_b32 s4, exec_lo -; GFX11-NEXT: v_mbcnt_lo_u32_b32 v0, s5, 0 -; GFX11-NEXT: ; implicit-def: $vgpr1 +; GFX11-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(SALU_CYCLE_1) +; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v2 +; GFX11-NEXT: ; implicit-def: $vgpr2 ; GFX11-NEXT: s_waitcnt lgkmcnt(0) ; GFX11-NEXT: s_add_i32 s3, s3, 4 -; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-NEXT: v_cmpx_eq_u32_e32 0, v0 -; GFX11-NEXT: s_cbranch_execz .LBB28_2 -; GFX11-NEXT: ; %bb.1: -; GFX11-NEXT: s_bcnt1_i32_b32 s5, s5 -; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; GFX11-NEXT: v_cvt_f32_ubyte0_e32 v1, s5 +; GFX11-NEXT: s_and_saveexec_b32 s4, vcc_lo +; GFX11-NEXT: s_xor_b32 s4, exec_lo, s4 +; GFX11-NEXT: s_cbranch_execz .LBB28_4 +; GFX11-NEXT: ; %bb.3: ; GFX11-NEXT: s_lshl_b32 s5, s3, 3 -; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) -; GFX11-NEXT: v_dual_mov_b32 v2, s5 :: v_dual_mul_f32 v1, 0x42280000, v1 -; GFX11-NEXT: ds_add_rtn_f32 v1, v2, v1 +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: v_mov_b32_e32 v2, s5 +; GFX11-NEXT: ds_add_rtn_f32 v2, v2, v1 ; GFX11-NEXT: s_waitcnt lgkmcnt(0) ; GFX11-NEXT: buffer_gl0_inv -; GFX11-NEXT: .LBB28_2: +; GFX11-NEXT: .LBB28_4: ; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s4 ; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_3) | instid1(VALU_DEP_1) ; GFX11-NEXT: s_mov_b32 s6, exec_lo -; GFX11-NEXT: v_readfirstlane_b32 s4, v1 -; GFX11-NEXT: v_mbcnt_lo_u32_b32 v2, s6, 0 +; GFX11-NEXT: v_readfirstlane_b32 s4, v2 +; GFX11-NEXT: v_mbcnt_lo_u32_b32 v1, s6, 0 ; GFX11-NEXT: s_mov_b32 s5, exec_lo -; GFX11-NEXT: v_cmpx_eq_u32_e32 0, v2 -; GFX11-NEXT: s_cbranch_execz .LBB28_4 -; GFX11-NEXT: ; %bb.3: +; GFX11-NEXT: v_cmpx_eq_u32_e32 0, v1 +; GFX11-NEXT: s_cbranch_execz .LBB28_6 +; GFX11-NEXT: ; %bb.5: ; GFX11-NEXT: s_bcnt1_i32_b32 s6, s6 ; GFX11-NEXT: s_lshl_b32 s3, s3, 4 ; GFX11-NEXT: v_cvt_f32_ubyte0_e32 v1, s6 @@ -7183,19 +7222,16 @@ define amdgpu_kernel void @local_ds_fadd(ptr addrspace(1) %out, ptr addrspace(3) ; GFX11-NEXT: ds_add_f32 v2, v1 ; GFX11-NEXT: s_waitcnt lgkmcnt(0) ; GFX11-NEXT: buffer_gl0_inv -; GFX11-NEXT: .LBB28_4: +; GFX11-NEXT: .LBB28_6: ; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s5 -; GFX11-NEXT: v_cvt_f32_ubyte0_e32 v0, v0 +; GFX11-NEXT: v_add_f32_e32 v2, s4, v0 ; GFX11-NEXT: v_bfrev_b32_e32 v1, 1 ; GFX11-NEXT: s_mov_b32 s3, exec_lo -; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-NEXT: v_mul_f32_e32 v0, 0x42280000, v0 -; GFX11-NEXT: v_add_f32_e32 v2, s4, v0 ; GFX11-NEXT: ; implicit-def: $vgpr0 -; GFX11-NEXT: .LBB28_5: ; %ComputeLoop +; GFX11-NEXT: .LBB28_7: ; %ComputeLoop1 ; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX11-NEXT: s_ctz_i32_b32 s4, s3 -; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) ; GFX11-NEXT: v_readfirstlane_b32 s5, v1 ; GFX11-NEXT: v_readlane_b32 s6, v2, s4 ; GFX11-NEXT: s_lshl_b32 s7, 1, s4 @@ -7205,21 +7241,21 @@ define amdgpu_kernel void @local_ds_fadd(ptr addrspace(1) %out, ptr addrspace(3) ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) ; GFX11-NEXT: v_add_f32_e32 v1, s6, v1 ; GFX11-NEXT: s_cmp_lg_u32 s3, 0 -; GFX11-NEXT: s_cbranch_scc1 .LBB28_5 -; GFX11-NEXT: ; %bb.6: ; %ComputeEnd +; GFX11-NEXT: s_cbranch_scc1 .LBB28_7 +; GFX11-NEXT: ; %bb.8: ; %ComputeEnd2 ; GFX11-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) ; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v2 ; GFX11-NEXT: ; implicit-def: $vgpr2 ; GFX11-NEXT: s_and_saveexec_b32 s3, vcc_lo ; GFX11-NEXT: s_xor_b32 s3, exec_lo, s3 -; GFX11-NEXT: s_cbranch_execz .LBB28_8 -; GFX11-NEXT: ; %bb.7: +; GFX11-NEXT: s_cbranch_execz .LBB28_10 +; GFX11-NEXT: ; %bb.9: ; GFX11-NEXT: v_mov_b32_e32 v2, s2 ; GFX11-NEXT: ds_add_rtn_f32 v2, v2, v1 ; GFX11-NEXT: s_waitcnt lgkmcnt(0) ; GFX11-NEXT: buffer_gl0_inv -; GFX11-NEXT: .LBB28_8: +; GFX11-NEXT: .LBB28_10: ; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s3 ; GFX11-NEXT: s_load_b64 s[0:1], s[0:1], 0x0 ; GFX11-NEXT: v_readfirstlane_b32 s2, v2 @@ -7233,34 +7269,47 @@ define amdgpu_kernel void @local_ds_fadd(ptr addrspace(1) %out, ptr addrspace(3) ; ; GFX10-LABEL: local_ds_fadd: ; GFX10: ; %bb.0: +; GFX10-NEXT: v_bfrev_b32_e32 v1, 1 +; GFX10-NEXT: v_mov_b32_e32 v2, 0x42280000 +; GFX10-NEXT: s_mov_b32 s2, exec_lo +; GFX10-NEXT: ; implicit-def: $vgpr0 +; GFX10-NEXT: .LBB28_1: ; %ComputeLoop +; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX10-NEXT: s_ff1_i32_b32 s3, s2 +; GFX10-NEXT: v_readfirstlane_b32 s4, v1 +; GFX10-NEXT: v_readlane_b32 s5, v2, s3 +; GFX10-NEXT: s_lshl_b32 s6, 1, s3 +; GFX10-NEXT: s_andn2_b32 s2, s2, s6 +; GFX10-NEXT: v_writelane_b32 v0, s4, s3 +; GFX10-NEXT: v_add_f32_e32 v1, s5, v1 +; GFX10-NEXT: s_cmp_lg_u32 s2, 0 +; GFX10-NEXT: s_cbranch_scc1 .LBB28_1 +; GFX10-NEXT: ; %bb.2: ; %ComputeEnd ; GFX10-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x8 -; GFX10-NEXT: s_mov_b32 s5, exec_lo -; GFX10-NEXT: ; implicit-def: $vgpr1 -; GFX10-NEXT: v_mbcnt_lo_u32_b32 v0, s5, 0 -; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0 +; GFX10-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0 +; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v2 +; GFX10-NEXT: ; implicit-def: $vgpr2 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) ; GFX10-NEXT: s_add_i32 s3, s3, 4 ; GFX10-NEXT: s_and_saveexec_b32 s4, vcc_lo -; GFX10-NEXT: s_cbranch_execz .LBB28_2 -; GFX10-NEXT: ; %bb.1: -; GFX10-NEXT: s_bcnt1_i32_b32 s5, s5 -; GFX10-NEXT: v_cvt_f32_ubyte0_e32 v1, s5 +; GFX10-NEXT: s_xor_b32 s4, exec_lo, s4 +; GFX10-NEXT: s_cbranch_execz .LBB28_4 +; GFX10-NEXT: ; %bb.3: ; GFX10-NEXT: s_lshl_b32 s5, s3, 3 ; GFX10-NEXT: v_mov_b32_e32 v2, s5 -; GFX10-NEXT: v_mul_f32_e32 v1, 0x42280000, v1 -; GFX10-NEXT: ds_add_rtn_f32 v1, v2, v1 +; GFX10-NEXT: ds_add_rtn_f32 v2, v2, v1 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) ; GFX10-NEXT: buffer_gl0_inv -; GFX10-NEXT: .LBB28_2: +; GFX10-NEXT: .LBB28_4: ; GFX10-NEXT: s_waitcnt_depctr 0xffe3 ; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s4 ; GFX10-NEXT: s_mov_b32 s6, exec_lo -; GFX10-NEXT: v_readfirstlane_b32 s4, v1 -; GFX10-NEXT: v_mbcnt_lo_u32_b32 v2, s6, 0 -; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v2 +; GFX10-NEXT: v_readfirstlane_b32 s4, v2 +; GFX10-NEXT: v_mbcnt_lo_u32_b32 v1, s6, 0 +; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v1 ; GFX10-NEXT: s_and_saveexec_b32 s5, vcc_lo -; GFX10-NEXT: s_cbranch_execz .LBB28_4 -; GFX10-NEXT: ; %bb.3: +; GFX10-NEXT: s_cbranch_execz .LBB28_6 +; GFX10-NEXT: ; %bb.5: ; GFX10-NEXT: s_bcnt1_i32_b32 s6, s6 ; GFX10-NEXT: s_lshl_b32 s3, s3, 4 ; GFX10-NEXT: v_cvt_f32_ubyte0_e32 v1, s6 @@ -7270,16 +7319,14 @@ define amdgpu_kernel void @local_ds_fadd(ptr addrspace(1) %out, ptr addrspace(3) ; GFX10-NEXT: ds_add_f32 v2, v1 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) ; GFX10-NEXT: buffer_gl0_inv -; GFX10-NEXT: .LBB28_4: +; GFX10-NEXT: .LBB28_6: ; GFX10-NEXT: s_waitcnt_depctr 0xffe3 ; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s5 -; GFX10-NEXT: v_cvt_f32_ubyte0_e32 v0, v0 +; GFX10-NEXT: v_add_f32_e32 v2, s4, v0 ; GFX10-NEXT: v_bfrev_b32_e32 v1, 1 ; GFX10-NEXT: s_mov_b32 s3, exec_lo -; GFX10-NEXT: v_mul_f32_e32 v0, 0x42280000, v0 -; GFX10-NEXT: v_add_f32_e32 v2, s4, v0 ; GFX10-NEXT: ; implicit-def: $vgpr0 -; GFX10-NEXT: .LBB28_5: ; %ComputeLoop +; GFX10-NEXT: .LBB28_7: ; %ComputeLoop1 ; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX10-NEXT: s_ff1_i32_b32 s4, s3 ; GFX10-NEXT: v_readfirstlane_b32 s5, v1 @@ -7289,21 +7336,21 @@ define amdgpu_kernel void @local_ds_fadd(ptr addrspace(1) %out, ptr addrspace(3) ; GFX10-NEXT: v_writelane_b32 v0, s5, s4 ; GFX10-NEXT: v_add_f32_e32 v1, s6, v1 ; GFX10-NEXT: s_cmp_lg_u32 s3, 0 -; GFX10-NEXT: s_cbranch_scc1 .LBB28_5 -; GFX10-NEXT: ; %bb.6: ; %ComputeEnd +; GFX10-NEXT: s_cbranch_scc1 .LBB28_7 +; GFX10-NEXT: ; %bb.8: ; %ComputeEnd2 ; GFX10-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0 ; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v2 ; GFX10-NEXT: ; implicit-def: $vgpr2 ; GFX10-NEXT: s_and_saveexec_b32 s3, vcc_lo ; GFX10-NEXT: s_xor_b32 s3, exec_lo, s3 -; GFX10-NEXT: s_cbranch_execz .LBB28_8 -; GFX10-NEXT: ; %bb.7: +; GFX10-NEXT: s_cbranch_execz .LBB28_10 +; GFX10-NEXT: ; %bb.9: ; GFX10-NEXT: v_mov_b32_e32 v2, s2 ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX10-NEXT: ds_add_rtn_f32 v2, v2, v1 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) ; GFX10-NEXT: buffer_gl0_inv -; GFX10-NEXT: .LBB28_8: +; GFX10-NEXT: .LBB28_10: ; GFX10-NEXT: s_waitcnt_depctr 0xffe3 ; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s3 ; GFX10-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x0 @@ -7316,34 +7363,48 @@ define amdgpu_kernel void @local_ds_fadd(ptr addrspace(1) %out, ptr addrspace(3) ; ; GFX90A-LABEL: local_ds_fadd: ; GFX90A: ; %bb.0: +; GFX90A-NEXT: s_mov_b64 s[2:3], exec +; GFX90A-NEXT: v_bfrev_b32_e32 v1, 1 +; GFX90A-NEXT: v_mov_b32_e32 v2, 0x42280000 +; GFX90A-NEXT: ; implicit-def: $vgpr0 +; GFX90A-NEXT: .LBB28_1: ; %ComputeLoop +; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX90A-NEXT: s_ff1_i32_b64 s6, s[2:3] +; GFX90A-NEXT: s_lshl_b64 s[4:5], 1, s6 +; GFX90A-NEXT: v_readfirstlane_b32 s7, v1 +; GFX90A-NEXT: v_readlane_b32 s8, v2, s6 +; GFX90A-NEXT: s_mov_b32 m0, s6 +; GFX90A-NEXT: s_andn2_b64 s[2:3], s[2:3], s[4:5] +; GFX90A-NEXT: v_writelane_b32 v0, s7, m0 +; GFX90A-NEXT: s_cmp_lg_u64 s[2:3], 0 +; GFX90A-NEXT: v_add_f32_e32 v1, s8, v1 +; GFX90A-NEXT: s_cbranch_scc1 .LBB28_1 +; GFX90A-NEXT: ; %bb.2: ; %ComputeEnd ; GFX90A-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x8 -; GFX90A-NEXT: s_mov_b64 s[4:5], exec -; GFX90A-NEXT: v_mbcnt_lo_u32_b32 v0, s4, 0 -; GFX90A-NEXT: v_mbcnt_hi_u32_b32 v0, s5, v0 -; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0 +; GFX90A-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0 +; GFX90A-NEXT: v_mbcnt_hi_u32_b32 v2, exec_hi, v2 +; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, 0, v2 +; GFX90A-NEXT: ; implicit-def: $vgpr2 ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) ; GFX90A-NEXT: s_add_i32 s3, s3, 4 -; GFX90A-NEXT: ; implicit-def: $vgpr1 -; GFX90A-NEXT: s_and_saveexec_b64 s[6:7], vcc -; GFX90A-NEXT: s_cbranch_execz .LBB28_2 -; GFX90A-NEXT: ; %bb.1: -; GFX90A-NEXT: s_bcnt1_i32_b64 s4, s[4:5] -; GFX90A-NEXT: s_lshl_b32 s8, s3, 3 -; GFX90A-NEXT: v_cvt_f32_ubyte0_e32 v1, s4 -; GFX90A-NEXT: v_mul_f32_e32 v1, 0x42280000, v1 -; GFX90A-NEXT: v_mov_b32_e32 v2, s8 -; GFX90A-NEXT: ds_add_rtn_f32 v1, v2, v1 +; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], vcc +; GFX90A-NEXT: s_xor_b64 s[4:5], exec, s[4:5] +; GFX90A-NEXT: s_cbranch_execz .LBB28_4 +; GFX90A-NEXT: ; %bb.3: +; GFX90A-NEXT: s_lshl_b32 s6, s3, 3 +; GFX90A-NEXT: v_mov_b32_e32 v2, s6 +; GFX90A-NEXT: ds_add_rtn_f32 v2, v2, v1 ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) -; GFX90A-NEXT: .LBB28_2: -; GFX90A-NEXT: s_or_b64 exec, exec, s[6:7] +; GFX90A-NEXT: .LBB28_4: +; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX90A-NEXT: s_mov_b64 s[6:7], exec -; GFX90A-NEXT: v_readfirstlane_b32 s8, v1 ; GFX90A-NEXT: v_mbcnt_lo_u32_b32 v1, s6, 0 ; GFX90A-NEXT: v_mbcnt_hi_u32_b32 v1, s7, v1 +; GFX90A-NEXT: v_readfirstlane_b32 s8, v2 ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1 ; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], vcc -; GFX90A-NEXT: s_cbranch_execz .LBB28_4 -; GFX90A-NEXT: ; %bb.3: +; GFX90A-NEXT: s_cbranch_execz .LBB28_6 +; GFX90A-NEXT: ; %bb.5: ; GFX90A-NEXT: s_bcnt1_i32_b64 s6, s[6:7] ; GFX90A-NEXT: v_cvt_f32_ubyte0_e32 v1, s6 ; GFX90A-NEXT: s_lshl_b32 s3, s3, 4 @@ -7351,15 +7412,13 @@ define amdgpu_kernel void @local_ds_fadd(ptr addrspace(1) %out, ptr addrspace(3) ; GFX90A-NEXT: v_mov_b32_e32 v2, s3 ; GFX90A-NEXT: ds_add_f32 v2, v1 ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) -; GFX90A-NEXT: .LBB28_4: +; GFX90A-NEXT: .LBB28_6: ; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5] -; GFX90A-NEXT: v_cvt_f32_ubyte0_e32 v0, v0 -; GFX90A-NEXT: v_mul_f32_e32 v0, 0x42280000, v0 ; GFX90A-NEXT: s_mov_b64 s[4:5], exec ; GFX90A-NEXT: v_add_f32_e32 v2, s8, v0 ; GFX90A-NEXT: v_bfrev_b32_e32 v1, 1 ; GFX90A-NEXT: ; implicit-def: $vgpr0 -; GFX90A-NEXT: .LBB28_5: ; %ComputeLoop +; GFX90A-NEXT: .LBB28_7: ; %ComputeLoop1 ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_ff1_i32_b64 s3, s[4:5] ; GFX90A-NEXT: s_lshl_b64 s[6:7], 1, s3 @@ -7370,20 +7429,20 @@ define amdgpu_kernel void @local_ds_fadd(ptr addrspace(1) %out, ptr addrspace(3) ; GFX90A-NEXT: v_writelane_b32 v0, s8, m0 ; GFX90A-NEXT: s_cmp_lg_u64 s[4:5], 0 ; GFX90A-NEXT: v_add_f32_e32 v1, s9, v1 -; GFX90A-NEXT: s_cbranch_scc1 .LBB28_5 -; GFX90A-NEXT: ; %bb.6: ; %ComputeEnd +; GFX90A-NEXT: s_cbranch_scc1 .LBB28_7 +; GFX90A-NEXT: ; %bb.8: ; %ComputeEnd2 ; GFX90A-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0 ; GFX90A-NEXT: v_mbcnt_hi_u32_b32 v2, exec_hi, v2 ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, 0, v2 ; GFX90A-NEXT: ; implicit-def: $vgpr2 ; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], vcc ; GFX90A-NEXT: s_xor_b64 s[4:5], exec, s[4:5] -; GFX90A-NEXT: s_cbranch_execz .LBB28_8 -; GFX90A-NEXT: ; %bb.7: +; GFX90A-NEXT: s_cbranch_execz .LBB28_10 +; GFX90A-NEXT: ; %bb.9: ; GFX90A-NEXT: v_mov_b32_e32 v2, s2 ; GFX90A-NEXT: ds_add_rtn_f32 v2, v2, v1 ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) -; GFX90A-NEXT: .LBB28_8: +; GFX90A-NEXT: .LBB28_10: ; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX90A-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x0 ; GFX90A-NEXT: v_readfirstlane_b32 s2, v2 @@ -7395,34 +7454,48 @@ define amdgpu_kernel void @local_ds_fadd(ptr addrspace(1) %out, ptr addrspace(3) ; ; GFX908-LABEL: local_ds_fadd: ; GFX908: ; %bb.0: +; GFX908-NEXT: s_mov_b64 s[2:3], exec +; GFX908-NEXT: v_bfrev_b32_e32 v1, 1 +; GFX908-NEXT: v_mov_b32_e32 v2, 0x42280000 +; GFX908-NEXT: ; implicit-def: $vgpr0 +; GFX908-NEXT: .LBB28_1: ; %ComputeLoop +; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX908-NEXT: s_ff1_i32_b64 s6, s[2:3] +; GFX908-NEXT: s_lshl_b64 s[4:5], 1, s6 +; GFX908-NEXT: v_readfirstlane_b32 s7, v1 +; GFX908-NEXT: v_readlane_b32 s8, v2, s6 +; GFX908-NEXT: s_mov_b32 m0, s6 +; GFX908-NEXT: s_andn2_b64 s[2:3], s[2:3], s[4:5] +; GFX908-NEXT: v_writelane_b32 v0, s7, m0 +; GFX908-NEXT: s_cmp_lg_u64 s[2:3], 0 +; GFX908-NEXT: v_add_f32_e32 v1, s8, v1 +; GFX908-NEXT: s_cbranch_scc1 .LBB28_1 +; GFX908-NEXT: ; %bb.2: ; %ComputeEnd ; GFX908-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x8 -; GFX908-NEXT: s_mov_b64 s[4:5], exec -; GFX908-NEXT: v_mbcnt_lo_u32_b32 v0, s4, 0 -; GFX908-NEXT: v_mbcnt_hi_u32_b32 v0, s5, v0 -; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0 +; GFX908-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0 +; GFX908-NEXT: v_mbcnt_hi_u32_b32 v2, exec_hi, v2 +; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, 0, v2 +; GFX908-NEXT: ; implicit-def: $vgpr2 ; GFX908-NEXT: s_waitcnt lgkmcnt(0) ; GFX908-NEXT: s_add_i32 s3, s3, 4 -; GFX908-NEXT: ; implicit-def: $vgpr1 -; GFX908-NEXT: s_and_saveexec_b64 s[6:7], vcc -; GFX908-NEXT: s_cbranch_execz .LBB28_2 -; GFX908-NEXT: ; %bb.1: -; GFX908-NEXT: s_bcnt1_i32_b64 s4, s[4:5] -; GFX908-NEXT: s_lshl_b32 s8, s3, 3 -; GFX908-NEXT: v_cvt_f32_ubyte0_e32 v1, s4 -; GFX908-NEXT: v_mul_f32_e32 v1, 0x42280000, v1 -; GFX908-NEXT: v_mov_b32_e32 v2, s8 -; GFX908-NEXT: ds_add_rtn_f32 v1, v2, v1 +; GFX908-NEXT: s_and_saveexec_b64 s[4:5], vcc +; GFX908-NEXT: s_xor_b64 s[4:5], exec, s[4:5] +; GFX908-NEXT: s_cbranch_execz .LBB28_4 +; GFX908-NEXT: ; %bb.3: +; GFX908-NEXT: s_lshl_b32 s6, s3, 3 +; GFX908-NEXT: v_mov_b32_e32 v2, s6 +; GFX908-NEXT: ds_add_rtn_f32 v2, v2, v1 ; GFX908-NEXT: s_waitcnt lgkmcnt(0) -; GFX908-NEXT: .LBB28_2: -; GFX908-NEXT: s_or_b64 exec, exec, s[6:7] +; GFX908-NEXT: .LBB28_4: +; GFX908-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_mov_b64 s[6:7], exec -; GFX908-NEXT: v_readfirstlane_b32 s8, v1 ; GFX908-NEXT: v_mbcnt_lo_u32_b32 v1, s6, 0 ; GFX908-NEXT: v_mbcnt_hi_u32_b32 v1, s7, v1 +; GFX908-NEXT: v_readfirstlane_b32 s8, v2 ; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1 ; GFX908-NEXT: s_and_saveexec_b64 s[4:5], vcc -; GFX908-NEXT: s_cbranch_execz .LBB28_4 -; GFX908-NEXT: ; %bb.3: +; GFX908-NEXT: s_cbranch_execz .LBB28_6 +; GFX908-NEXT: ; %bb.5: ; GFX908-NEXT: s_bcnt1_i32_b64 s6, s[6:7] ; GFX908-NEXT: v_cvt_f32_ubyte0_e32 v1, s6 ; GFX908-NEXT: s_lshl_b32 s3, s3, 4 @@ -7430,15 +7503,13 @@ define amdgpu_kernel void @local_ds_fadd(ptr addrspace(1) %out, ptr addrspace(3) ; GFX908-NEXT: v_mov_b32_e32 v2, s3 ; GFX908-NEXT: ds_add_f32 v2, v1 ; GFX908-NEXT: s_waitcnt lgkmcnt(0) -; GFX908-NEXT: .LBB28_4: +; GFX908-NEXT: .LBB28_6: ; GFX908-NEXT: s_or_b64 exec, exec, s[4:5] -; GFX908-NEXT: v_cvt_f32_ubyte0_e32 v0, v0 -; GFX908-NEXT: v_mul_f32_e32 v0, 0x42280000, v0 ; GFX908-NEXT: s_mov_b64 s[4:5], exec ; GFX908-NEXT: v_add_f32_e32 v2, s8, v0 ; GFX908-NEXT: v_bfrev_b32_e32 v1, 1 ; GFX908-NEXT: ; implicit-def: $vgpr0 -; GFX908-NEXT: .LBB28_5: ; %ComputeLoop +; GFX908-NEXT: .LBB28_7: ; %ComputeLoop1 ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_ff1_i32_b64 s3, s[4:5] ; GFX908-NEXT: s_lshl_b64 s[6:7], 1, s3 @@ -7449,20 +7520,20 @@ define amdgpu_kernel void @local_ds_fadd(ptr addrspace(1) %out, ptr addrspace(3) ; GFX908-NEXT: v_writelane_b32 v0, s8, m0 ; GFX908-NEXT: s_cmp_lg_u64 s[4:5], 0 ; GFX908-NEXT: v_add_f32_e32 v1, s9, v1 -; GFX908-NEXT: s_cbranch_scc1 .LBB28_5 -; GFX908-NEXT: ; %bb.6: ; %ComputeEnd +; GFX908-NEXT: s_cbranch_scc1 .LBB28_7 +; GFX908-NEXT: ; %bb.8: ; %ComputeEnd2 ; GFX908-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0 ; GFX908-NEXT: v_mbcnt_hi_u32_b32 v2, exec_hi, v2 ; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, 0, v2 ; GFX908-NEXT: ; implicit-def: $vgpr2 ; GFX908-NEXT: s_and_saveexec_b64 s[4:5], vcc ; GFX908-NEXT: s_xor_b64 s[4:5], exec, s[4:5] -; GFX908-NEXT: s_cbranch_execz .LBB28_8 -; GFX908-NEXT: ; %bb.7: +; GFX908-NEXT: s_cbranch_execz .LBB28_10 +; GFX908-NEXT: ; %bb.9: ; GFX908-NEXT: v_mov_b32_e32 v2, s2 ; GFX908-NEXT: ds_add_rtn_f32 v2, v2, v1 ; GFX908-NEXT: s_waitcnt lgkmcnt(0) -; GFX908-NEXT: .LBB28_8: +; GFX908-NEXT: .LBB28_10: ; GFX908-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x0 ; GFX908-NEXT: v_readfirstlane_b32 s2, v2 @@ -7474,35 +7545,49 @@ define amdgpu_kernel void @local_ds_fadd(ptr addrspace(1) %out, ptr addrspace(3) ; ; GFX8-LABEL: local_ds_fadd: ; GFX8: ; %bb.0: +; GFX8-NEXT: s_mov_b64 s[2:3], exec +; GFX8-NEXT: v_bfrev_b32_e32 v1, 1 +; GFX8-NEXT: v_mov_b32_e32 v2, 0x42280000 +; GFX8-NEXT: ; implicit-def: $vgpr0 +; GFX8-NEXT: .LBB28_1: ; %ComputeLoop +; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX8-NEXT: s_ff1_i32_b64 s6, s[2:3] +; GFX8-NEXT: s_lshl_b64 s[4:5], 1, s6 +; GFX8-NEXT: v_readfirstlane_b32 s7, v1 +; GFX8-NEXT: v_readlane_b32 s8, v2, s6 +; GFX8-NEXT: s_mov_b32 m0, s6 +; GFX8-NEXT: s_andn2_b64 s[2:3], s[2:3], s[4:5] +; GFX8-NEXT: v_writelane_b32 v0, s7, m0 +; GFX8-NEXT: s_cmp_lg_u64 s[2:3], 0 +; GFX8-NEXT: v_add_f32_e32 v1, s8, v1 +; GFX8-NEXT: s_cbranch_scc1 .LBB28_1 +; GFX8-NEXT: ; %bb.2: ; %ComputeEnd ; GFX8-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x8 -; GFX8-NEXT: s_mov_b64 s[4:5], exec -; GFX8-NEXT: v_mbcnt_lo_u32_b32 v0, s4, 0 -; GFX8-NEXT: v_mbcnt_hi_u32_b32 v0, s5, v0 -; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0 +; GFX8-NEXT: v_mbcnt_lo_u32_b32 v3, exec_lo, 0 +; GFX8-NEXT: v_mbcnt_hi_u32_b32 v3, exec_hi, v3 +; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 0, v3 +; GFX8-NEXT: ; implicit-def: $vgpr2 +; GFX8-NEXT: s_mov_b32 m0, -1 ; GFX8-NEXT: s_waitcnt lgkmcnt(0) ; GFX8-NEXT: s_add_i32 s3, s3, 4 -; GFX8-NEXT: ; implicit-def: $vgpr1 -; GFX8-NEXT: s_mov_b32 m0, -1 -; GFX8-NEXT: s_and_saveexec_b64 s[6:7], vcc -; GFX8-NEXT: s_cbranch_execz .LBB28_2 -; GFX8-NEXT: ; %bb.1: -; GFX8-NEXT: s_bcnt1_i32_b64 s4, s[4:5] -; GFX8-NEXT: s_lshl_b32 s8, s3, 3 -; GFX8-NEXT: v_cvt_f32_ubyte0_e32 v1, s4 -; GFX8-NEXT: v_mul_f32_e32 v1, 0x42280000, v1 -; GFX8-NEXT: v_mov_b32_e32 v2, s8 -; GFX8-NEXT: ds_add_rtn_f32 v1, v2, v1 +; GFX8-NEXT: s_and_saveexec_b64 s[4:5], vcc +; GFX8-NEXT: s_xor_b64 s[4:5], exec, s[4:5] +; GFX8-NEXT: s_cbranch_execz .LBB28_4 +; GFX8-NEXT: ; %bb.3: +; GFX8-NEXT: s_lshl_b32 s6, s3, 3 +; GFX8-NEXT: v_mov_b32_e32 v2, s6 +; GFX8-NEXT: ds_add_rtn_f32 v2, v2, v1 ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: .LBB28_2: -; GFX8-NEXT: s_or_b64 exec, exec, s[6:7] +; GFX8-NEXT: .LBB28_4: +; GFX8-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_mov_b64 s[6:7], exec -; GFX8-NEXT: v_readfirstlane_b32 s8, v1 ; GFX8-NEXT: v_mbcnt_lo_u32_b32 v1, s6, 0 ; GFX8-NEXT: v_mbcnt_hi_u32_b32 v1, s7, v1 +; GFX8-NEXT: v_readfirstlane_b32 s8, v2 ; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1 ; GFX8-NEXT: s_and_saveexec_b64 s[4:5], vcc -; GFX8-NEXT: s_cbranch_execz .LBB28_4 -; GFX8-NEXT: ; %bb.3: +; GFX8-NEXT: s_cbranch_execz .LBB28_6 +; GFX8-NEXT: ; %bb.5: ; GFX8-NEXT: s_bcnt1_i32_b64 s6, s[6:7] ; GFX8-NEXT: v_cvt_f32_ubyte0_e32 v1, s6 ; GFX8-NEXT: s_lshl_b32 s3, s3, 4 @@ -7510,15 +7595,13 @@ define amdgpu_kernel void @local_ds_fadd(ptr addrspace(1) %out, ptr addrspace(3) ; GFX8-NEXT: v_mov_b32_e32 v2, s3 ; GFX8-NEXT: ds_add_f32 v2, v1 ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: .LBB28_4: +; GFX8-NEXT: .LBB28_6: ; GFX8-NEXT: s_or_b64 exec, exec, s[4:5] -; GFX8-NEXT: v_cvt_f32_ubyte0_e32 v0, v0 -; GFX8-NEXT: v_mul_f32_e32 v0, 0x42280000, v0 ; GFX8-NEXT: s_mov_b64 s[4:5], exec ; GFX8-NEXT: v_add_f32_e32 v2, s8, v0 ; GFX8-NEXT: v_bfrev_b32_e32 v1, 1 ; GFX8-NEXT: ; implicit-def: $vgpr0 -; GFX8-NEXT: .LBB28_5: ; %ComputeLoop +; GFX8-NEXT: .LBB28_7: ; %ComputeLoop1 ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_ff1_i32_b64 s3, s[4:5] ; GFX8-NEXT: s_lshl_b64 s[6:7], 1, s3 @@ -7529,21 +7612,21 @@ define amdgpu_kernel void @local_ds_fadd(ptr addrspace(1) %out, ptr addrspace(3) ; GFX8-NEXT: v_writelane_b32 v0, s8, m0 ; GFX8-NEXT: s_cmp_lg_u64 s[4:5], 0 ; GFX8-NEXT: v_add_f32_e32 v1, s9, v1 -; GFX8-NEXT: s_cbranch_scc1 .LBB28_5 -; GFX8-NEXT: ; %bb.6: ; %ComputeEnd +; GFX8-NEXT: s_cbranch_scc1 .LBB28_7 +; GFX8-NEXT: ; %bb.8: ; %ComputeEnd2 ; GFX8-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0 ; GFX8-NEXT: v_mbcnt_hi_u32_b32 v2, exec_hi, v2 ; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 0, v2 ; GFX8-NEXT: ; implicit-def: $vgpr2 ; GFX8-NEXT: s_and_saveexec_b64 s[4:5], vcc ; GFX8-NEXT: s_xor_b64 s[4:5], exec, s[4:5] -; GFX8-NEXT: s_cbranch_execz .LBB28_8 -; GFX8-NEXT: ; %bb.7: +; GFX8-NEXT: s_cbranch_execz .LBB28_10 +; GFX8-NEXT: ; %bb.9: ; GFX8-NEXT: v_mov_b32_e32 v2, s2 ; GFX8-NEXT: s_mov_b32 m0, -1 ; GFX8-NEXT: ds_add_rtn_f32 v2, v2, v1 ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: .LBB28_8: +; GFX8-NEXT: .LBB28_10: ; GFX8-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x0 ; GFX8-NEXT: v_readfirstlane_b32 s2, v2 @@ -7557,47 +7640,35 @@ define amdgpu_kernel void @local_ds_fadd(ptr addrspace(1) %out, ptr addrspace(3) ; GFX7-LABEL: local_ds_fadd: ; GFX7: ; %bb.0: ; GFX7-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x2 -; GFX7-NEXT: s_mov_b64 s[6:7], exec -; GFX7-NEXT: v_mbcnt_lo_u32_b32_e64 v0, s6, 0 -; GFX7-NEXT: v_mbcnt_hi_u32_b32_e32 v0, s7, v0 -; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0 +; GFX7-NEXT: s_mov_b32 m0, -1 ; GFX7-NEXT: s_waitcnt lgkmcnt(0) +; GFX7-NEXT: s_lshl_b32 s4, s3, 3 +; GFX7-NEXT: v_mov_b32_e32 v0, s4 +; GFX7-NEXT: ds_read_b32 v0, v0 offset:32 ; GFX7-NEXT: s_add_i32 s3, s3, 4 -; GFX7-NEXT: ; implicit-def: $vgpr1 -; GFX7-NEXT: s_mov_b32 m0, -1 -; GFX7-NEXT: s_and_saveexec_b64 s[4:5], vcc -; GFX7-NEXT: s_cbranch_execz .LBB28_4 -; GFX7-NEXT: ; %bb.1: -; GFX7-NEXT: s_lshl_b32 s8, s3, 3 -; GFX7-NEXT: v_mov_b32_e32 v2, s8 -; GFX7-NEXT: ds_read_b32 v1, v2 -; GFX7-NEXT: s_bcnt1_i32_b64 s6, s[6:7] -; GFX7-NEXT: v_cvt_f32_ubyte0_e32 v3, s6 -; GFX7-NEXT: v_mul_f32_e32 v3, 0x42280000, v3 -; GFX7-NEXT: s_mov_b64 s[6:7], 0 -; GFX7-NEXT: .LBB28_2: ; %atomicrmw.start +; GFX7-NEXT: s_lshl_b32 s6, s3, 3 +; GFX7-NEXT: s_mov_b64 s[4:5], 0 +; GFX7-NEXT: v_mov_b32_e32 v1, s6 +; GFX7-NEXT: .LBB28_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: s_waitcnt lgkmcnt(0) -; GFX7-NEXT: v_mov_b32_e32 v4, v1 -; GFX7-NEXT: v_add_f32_e32 v1, v4, v3 -; GFX7-NEXT: ds_cmpst_rtn_b32 v1, v2, v4, v1 +; GFX7-NEXT: v_mov_b32_e32 v2, v0 +; GFX7-NEXT: v_add_f32_e32 v0, 0x42280000, v2 +; GFX7-NEXT: ds_cmpst_rtn_b32 v0, v1, v2, v0 ; GFX7-NEXT: s_waitcnt lgkmcnt(0) -; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v1, v4 -; GFX7-NEXT: s_or_b64 s[6:7], vcc, s[6:7] -; GFX7-NEXT: s_andn2_b64 exec, exec, s[6:7] -; GFX7-NEXT: s_cbranch_execnz .LBB28_2 -; GFX7-NEXT: ; %bb.3: ; %Flow18 -; GFX7-NEXT: s_or_b64 exec, exec, s[6:7] -; GFX7-NEXT: .LBB28_4: ; %Flow19 +; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v0, v2 +; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5] +; GFX7-NEXT: s_cbranch_execnz .LBB28_1 +; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX7-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_mov_b64 s[6:7], exec -; GFX7-NEXT: v_readfirstlane_b32 s8, v1 ; GFX7-NEXT: v_mbcnt_lo_u32_b32_e64 v1, s6, 0 ; GFX7-NEXT: v_mbcnt_hi_u32_b32_e32 v1, s7, v1 ; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1 ; GFX7-NEXT: s_and_saveexec_b64 s[4:5], vcc -; GFX7-NEXT: s_cbranch_execz .LBB28_7 -; GFX7-NEXT: ; %bb.5: +; GFX7-NEXT: s_cbranch_execz .LBB28_5 +; GFX7-NEXT: ; %bb.3: ; GFX7-NEXT: s_lshl_b32 s3, s3, 4 ; GFX7-NEXT: v_mov_b32_e32 v1, s3 ; GFX7-NEXT: ds_read_b32 v3, v1 @@ -7605,7 +7676,7 @@ define amdgpu_kernel void @local_ds_fadd(ptr addrspace(1) %out, ptr addrspace(3) ; GFX7-NEXT: v_cvt_f32_ubyte0_e32 v2, s3 ; GFX7-NEXT: v_mul_f32_e32 v2, 0x42280000, v2 ; GFX7-NEXT: s_mov_b64 s[6:7], 0 -; GFX7-NEXT: .LBB28_6: ; %atomicrmw.start2 +; GFX7-NEXT: .LBB28_4: ; %atomicrmw.start2 ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: s_waitcnt lgkmcnt(0) ; GFX7-NEXT: v_add_f32_e32 v4, v3, v2 @@ -7615,16 +7686,13 @@ define amdgpu_kernel void @local_ds_fadd(ptr addrspace(1) %out, ptr addrspace(3) ; GFX7-NEXT: s_or_b64 s[6:7], vcc, s[6:7] ; GFX7-NEXT: v_mov_b32_e32 v3, v4 ; GFX7-NEXT: s_andn2_b64 exec, exec, s[6:7] -; GFX7-NEXT: s_cbranch_execnz .LBB28_6 -; GFX7-NEXT: .LBB28_7: ; %Flow17 +; GFX7-NEXT: s_cbranch_execnz .LBB28_4 +; GFX7-NEXT: .LBB28_5: ; %Flow17 ; GFX7-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX7-NEXT: v_mov_b32_e32 v2, s2 ; GFX7-NEXT: ds_read_b32 v1, v2 -; GFX7-NEXT: v_cvt_f32_ubyte0_e32 v0, v0 -; GFX7-NEXT: v_mul_f32_e32 v0, 0x42280000, v0 -; GFX7-NEXT: v_add_f32_e32 v0, s8, v0 ; GFX7-NEXT: s_mov_b64 s[2:3], 0 -; GFX7-NEXT: .LBB28_8: ; %atomicrmw.start8 +; GFX7-NEXT: .LBB28_6: ; %atomicrmw.start8 ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: s_waitcnt lgkmcnt(0) ; GFX7-NEXT: v_mov_b32_e32 v3, v1 @@ -7634,8 +7702,8 @@ define amdgpu_kernel void @local_ds_fadd(ptr addrspace(1) %out, ptr addrspace(3) ; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v1, v3 ; GFX7-NEXT: s_or_b64 s[2:3], vcc, s[2:3] ; GFX7-NEXT: s_andn2_b64 exec, exec, s[2:3] -; GFX7-NEXT: s_cbranch_execnz .LBB28_8 -; GFX7-NEXT: ; %bb.9: ; %atomicrmw.end7 +; GFX7-NEXT: s_cbranch_execnz .LBB28_6 +; GFX7-NEXT: ; %bb.7: ; %atomicrmw.end7 ; GFX7-NEXT: s_or_b64 exec, exec, s[2:3] ; GFX7-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x0 ; GFX7-NEXT: s_mov_b32 s3, 0xf000 @@ -7647,47 +7715,36 @@ define amdgpu_kernel void @local_ds_fadd(ptr addrspace(1) %out, ptr addrspace(3) ; GFX6-LABEL: local_ds_fadd: ; GFX6: ; %bb.0: ; GFX6-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x2 -; GFX6-NEXT: s_mov_b64 s[6:7], exec -; GFX6-NEXT: v_mbcnt_lo_u32_b32_e64 v0, s6, 0 -; GFX6-NEXT: v_mbcnt_hi_u32_b32_e32 v0, s7, v0 -; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0 +; GFX6-NEXT: s_mov_b32 m0, -1 ; GFX6-NEXT: s_waitcnt lgkmcnt(0) +; GFX6-NEXT: s_lshl_b32 s4, s3, 3 +; GFX6-NEXT: s_add_i32 s4, s4, 32 +; GFX6-NEXT: v_mov_b32_e32 v0, s4 +; GFX6-NEXT: ds_read_b32 v0, v0 ; GFX6-NEXT: s_add_i32 s3, s3, 4 -; GFX6-NEXT: ; implicit-def: $vgpr1 -; GFX6-NEXT: s_mov_b32 m0, -1 -; GFX6-NEXT: s_and_saveexec_b64 s[4:5], vcc -; GFX6-NEXT: s_cbranch_execz .LBB28_4 -; GFX6-NEXT: ; %bb.1: -; GFX6-NEXT: s_lshl_b32 s8, s3, 3 -; GFX6-NEXT: v_mov_b32_e32 v2, s8 -; GFX6-NEXT: ds_read_b32 v1, v2 -; GFX6-NEXT: s_bcnt1_i32_b64 s6, s[6:7] -; GFX6-NEXT: v_cvt_f32_ubyte0_e32 v3, s6 -; GFX6-NEXT: v_mul_f32_e32 v3, 0x42280000, v3 -; GFX6-NEXT: s_mov_b64 s[6:7], 0 -; GFX6-NEXT: .LBB28_2: ; %atomicrmw.start +; GFX6-NEXT: s_lshl_b32 s6, s3, 3 +; GFX6-NEXT: s_mov_b64 s[4:5], 0 +; GFX6-NEXT: v_mov_b32_e32 v1, s6 +; GFX6-NEXT: .LBB28_1: ; %atomicrmw.start ; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX6-NEXT: s_waitcnt lgkmcnt(0) -; GFX6-NEXT: v_mov_b32_e32 v4, v1 -; GFX6-NEXT: v_add_f32_e32 v1, v4, v3 -; GFX6-NEXT: ds_cmpst_rtn_b32 v1, v2, v4, v1 +; GFX6-NEXT: v_mov_b32_e32 v2, v0 +; GFX6-NEXT: v_add_f32_e32 v0, 0x42280000, v2 +; GFX6-NEXT: ds_cmpst_rtn_b32 v0, v1, v2, v0 ; GFX6-NEXT: s_waitcnt lgkmcnt(0) -; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v1, v4 -; GFX6-NEXT: s_or_b64 s[6:7], vcc, s[6:7] -; GFX6-NEXT: s_andn2_b64 exec, exec, s[6:7] -; GFX6-NEXT: s_cbranch_execnz .LBB28_2 -; GFX6-NEXT: ; %bb.3: ; %Flow16 -; GFX6-NEXT: s_or_b64 exec, exec, s[6:7] -; GFX6-NEXT: .LBB28_4: ; %Flow17 +; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v0, v2 +; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5] +; GFX6-NEXT: s_cbranch_execnz .LBB28_1 +; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX6-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX6-NEXT: s_mov_b64 s[6:7], exec -; GFX6-NEXT: v_readfirstlane_b32 s8, v1 ; GFX6-NEXT: v_mbcnt_lo_u32_b32_e64 v1, s6, 0 ; GFX6-NEXT: v_mbcnt_hi_u32_b32_e32 v1, s7, v1 ; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1 ; GFX6-NEXT: s_and_saveexec_b64 s[4:5], vcc -; GFX6-NEXT: s_cbranch_execz .LBB28_7 -; GFX6-NEXT: ; %bb.5: +; GFX6-NEXT: s_cbranch_execz .LBB28_5 +; GFX6-NEXT: ; %bb.3: ; GFX6-NEXT: s_lshl_b32 s3, s3, 4 ; GFX6-NEXT: v_mov_b32_e32 v1, s3 ; GFX6-NEXT: ds_read_b32 v3, v1 @@ -7695,7 +7752,7 @@ define amdgpu_kernel void @local_ds_fadd(ptr addrspace(1) %out, ptr addrspace(3) ; GFX6-NEXT: v_cvt_f32_ubyte0_e32 v2, s3 ; GFX6-NEXT: v_mul_f32_e32 v2, 0x42280000, v2 ; GFX6-NEXT: s_mov_b64 s[6:7], 0 -; GFX6-NEXT: .LBB28_6: ; %atomicrmw.start2 +; GFX6-NEXT: .LBB28_4: ; %atomicrmw.start2 ; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX6-NEXT: s_waitcnt lgkmcnt(0) ; GFX6-NEXT: v_add_f32_e32 v4, v3, v2 @@ -7705,16 +7762,13 @@ define amdgpu_kernel void @local_ds_fadd(ptr addrspace(1) %out, ptr addrspace(3) ; GFX6-NEXT: s_or_b64 s[6:7], vcc, s[6:7] ; GFX6-NEXT: v_mov_b32_e32 v3, v4 ; GFX6-NEXT: s_andn2_b64 exec, exec, s[6:7] -; GFX6-NEXT: s_cbranch_execnz .LBB28_6 -; GFX6-NEXT: .LBB28_7: ; %Flow15 +; GFX6-NEXT: s_cbranch_execnz .LBB28_4 +; GFX6-NEXT: .LBB28_5: ; %Flow15 ; GFX6-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX6-NEXT: v_mov_b32_e32 v2, s2 ; GFX6-NEXT: ds_read_b32 v1, v2 -; GFX6-NEXT: v_cvt_f32_ubyte0_e32 v0, v0 -; GFX6-NEXT: v_mul_f32_e32 v0, 0x42280000, v0 -; GFX6-NEXT: v_add_f32_e32 v0, s8, v0 ; GFX6-NEXT: s_mov_b64 s[2:3], 0 -; GFX6-NEXT: .LBB28_8: ; %atomicrmw.start8 +; GFX6-NEXT: .LBB28_6: ; %atomicrmw.start8 ; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX6-NEXT: s_waitcnt lgkmcnt(0) ; GFX6-NEXT: v_mov_b32_e32 v3, v1 @@ -7724,8 +7778,8 @@ define amdgpu_kernel void @local_ds_fadd(ptr addrspace(1) %out, ptr addrspace(3) ; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v1, v3 ; GFX6-NEXT: s_or_b64 s[2:3], vcc, s[2:3] ; GFX6-NEXT: s_andn2_b64 exec, exec, s[2:3] -; GFX6-NEXT: s_cbranch_execnz .LBB28_8 -; GFX6-NEXT: ; %bb.9: ; %atomicrmw.end7 +; GFX6-NEXT: s_cbranch_execnz .LBB28_6 +; GFX6-NEXT: ; %bb.7: ; %atomicrmw.end7 ; GFX6-NEXT: s_or_b64 exec, exec, s[2:3] ; GFX6-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x0 ; GFX6-NEXT: s_mov_b32 s3, 0xf000 @@ -7748,26 +7802,40 @@ define amdgpu_kernel void @local_ds_fadd(ptr addrspace(1) %out, ptr addrspace(3) define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrspace(3) %ptrf, i32 %idx) { ; GFX12-LABEL: local_ds_fadd_one_as: ; GFX12: ; %bb.0: +; GFX12-NEXT: v_mov_b32_e32 v1, 0x42280000 +; GFX12-NEXT: s_mov_b32 s2, exec_lo +; GFX12-NEXT: s_brev_b32 s4, 1 +; GFX12-NEXT: ; implicit-def: $vgpr0 +; GFX12-NEXT: .LBB29_1: ; %ComputeLoop +; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX12-NEXT: s_ctz_i32_b32 s3, s2 +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) +; GFX12-NEXT: v_readlane_b32 s5, v1, s3 +; GFX12-NEXT: s_lshl_b32 s6, 1, s3 +; GFX12-NEXT: v_writelane_b32 v0, s4, s3 +; GFX12-NEXT: s_and_not1_b32 s2, s2, s6 +; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX12-NEXT: s_cmp_lg_u32 s2, 0 +; GFX12-NEXT: s_add_f32 s4, s4, s5 +; GFX12-NEXT: s_cbranch_scc1 .LBB29_1 +; GFX12-NEXT: ; %bb.2: ; %ComputeEnd ; GFX12-NEXT: s_load_b64 s[2:3], s[0:1], 0x8 -; GFX12-NEXT: s_mov_b32 s5, exec_lo -; GFX12-NEXT: s_mov_b32 s4, exec_lo -; GFX12-NEXT: v_mbcnt_lo_u32_b32 v0, s5, 0 +; GFX12-NEXT: v_mbcnt_lo_u32_b32 v1, exec_lo, 0 +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(SALU_CYCLE_1) +; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v1 ; GFX12-NEXT: ; implicit-def: $vgpr1 ; GFX12-NEXT: s_wait_kmcnt 0x0 ; GFX12-NEXT: s_add_co_i32 s3, s3, 4 -; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX12-NEXT: v_cmpx_eq_u32_e32 0, v0 -; GFX12-NEXT: s_cbranch_execz .LBB29_2 -; GFX12-NEXT: ; %bb.1: -; GFX12-NEXT: s_bcnt1_i32_b32 s5, s5 +; GFX12-NEXT: s_and_saveexec_b32 s5, vcc_lo +; GFX12-NEXT: s_xor_b32 s5, exec_lo, s5 +; GFX12-NEXT: s_cbranch_execz .LBB29_4 +; GFX12-NEXT: ; %bb.3: +; GFX12-NEXT: s_lshl_b32 s6, s3, 3 ; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; GFX12-NEXT: v_cvt_f32_ubyte0_e32 v1, s5 -; GFX12-NEXT: s_lshl_b32 s5, s3, 3 -; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) -; GFX12-NEXT: v_dual_mov_b32 v2, s5 :: v_dual_mul_f32 v1, 0x42280000, v1 -; GFX12-NEXT: ds_add_rtn_f32 v1, v2, v1 -; GFX12-NEXT: .LBB29_2: -; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s4 +; GFX12-NEXT: v_dual_mov_b32 v2, s4 :: v_dual_mov_b32 v1, s6 +; GFX12-NEXT: ds_add_rtn_f32 v1, v1, v2 +; GFX12-NEXT: .LBB29_4: +; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s5 ; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(VALU_DEP_1) ; GFX12-NEXT: s_mov_b32 s6, exec_lo ; GFX12-NEXT: s_wait_dscnt 0x0 @@ -7775,24 +7843,21 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs ; GFX12-NEXT: v_mbcnt_lo_u32_b32 v2, s6, 0 ; GFX12-NEXT: s_mov_b32 s4, exec_lo ; GFX12-NEXT: v_cmpx_eq_u32_e32 0, v2 -; GFX12-NEXT: s_cbranch_execz .LBB29_4 -; GFX12-NEXT: ; %bb.3: +; GFX12-NEXT: s_cbranch_execz .LBB29_6 +; GFX12-NEXT: ; %bb.5: ; GFX12-NEXT: s_bcnt1_i32_b32 s6, s6 ; GFX12-NEXT: s_lshl_b32 s3, s3, 4 ; GFX12-NEXT: v_cvt_f32_ubyte0_e32 v1, s6 ; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX12-NEXT: v_dual_mov_b32 v2, s3 :: v_dual_mul_f32 v1, 0x42280000, v1 ; GFX12-NEXT: ds_add_f32 v2, v1 -; GFX12-NEXT: .LBB29_4: +; GFX12-NEXT: .LBB29_6: ; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s4 -; GFX12-NEXT: v_cvt_f32_ubyte0_e32 v0, v0 +; GFX12-NEXT: v_add_f32_e32 v1, s5, v0 ; GFX12-NEXT: s_mov_b32 s4, exec_lo ; GFX12-NEXT: s_brev_b32 s3, 1 -; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-NEXT: v_mul_f32_e32 v0, 0x42280000, v0 -; GFX12-NEXT: v_add_f32_e32 v1, s5, v0 ; GFX12-NEXT: ; implicit-def: $vgpr0 -; GFX12-NEXT: .LBB29_5: ; %ComputeLoop +; GFX12-NEXT: .LBB29_7: ; %ComputeLoop1 ; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-NEXT: s_ctz_i32_b32 s5, s4 ; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) @@ -7803,19 +7868,19 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs ; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX12-NEXT: s_cmp_lg_u32 s4, 0 ; GFX12-NEXT: s_add_f32 s3, s3, s6 -; GFX12-NEXT: s_cbranch_scc1 .LBB29_5 -; GFX12-NEXT: ; %bb.6: ; %ComputeEnd +; GFX12-NEXT: s_cbranch_scc1 .LBB29_7 +; GFX12-NEXT: ; %bb.8: ; %ComputeEnd2 ; GFX12-NEXT: v_mbcnt_lo_u32_b32 v1, exec_lo, 0 ; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) ; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v1 ; GFX12-NEXT: ; implicit-def: $vgpr1 ; GFX12-NEXT: s_and_saveexec_b32 s4, vcc_lo ; GFX12-NEXT: s_xor_b32 s4, exec_lo, s4 -; GFX12-NEXT: s_cbranch_execz .LBB29_8 -; GFX12-NEXT: ; %bb.7: +; GFX12-NEXT: s_cbranch_execz .LBB29_10 +; GFX12-NEXT: ; %bb.9: ; GFX12-NEXT: v_dual_mov_b32 v1, s2 :: v_dual_mov_b32 v2, s3 ; GFX12-NEXT: ds_add_rtn_f32 v1, v1, v2 -; GFX12-NEXT: .LBB29_8: +; GFX12-NEXT: .LBB29_10: ; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s4 ; GFX12-NEXT: s_load_b64 s[0:1], s[0:1], 0x0 ; GFX12-NEXT: s_wait_dscnt 0x0 @@ -7830,49 +7895,61 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs ; ; GFX940-LABEL: local_ds_fadd_one_as: ; GFX940: ; %bb.0: +; GFX940-NEXT: s_mov_b64 s[2:3], exec +; GFX940-NEXT: v_bfrev_b32_e32 v1, 1 +; GFX940-NEXT: v_mov_b32_e32 v2, 0x42280000 +; GFX940-NEXT: ; implicit-def: $vgpr0 +; GFX940-NEXT: .LBB29_1: ; %ComputeLoop +; GFX940-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX940-NEXT: s_ff1_i32_b64 s6, s[2:3] +; GFX940-NEXT: s_lshl_b64 s[4:5], 1, s6 +; GFX940-NEXT: v_readfirstlane_b32 s7, v1 +; GFX940-NEXT: v_readlane_b32 s8, v2, s6 +; GFX940-NEXT: s_mov_b32 m0, s6 +; GFX940-NEXT: s_andn2_b64 s[2:3], s[2:3], s[4:5] +; GFX940-NEXT: v_writelane_b32 v0, s7, m0 +; GFX940-NEXT: s_cmp_lg_u64 s[2:3], 0 +; GFX940-NEXT: v_add_f32_e32 v1, s8, v1 +; GFX940-NEXT: s_cbranch_scc1 .LBB29_1 +; GFX940-NEXT: ; %bb.2: ; %ComputeEnd ; GFX940-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x8 -; GFX940-NEXT: s_mov_b64 s[4:5], exec -; GFX940-NEXT: v_mbcnt_lo_u32_b32 v0, s4, 0 -; GFX940-NEXT: v_mbcnt_hi_u32_b32 v0, s5, v0 -; GFX940-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0 +; GFX940-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0 +; GFX940-NEXT: v_mbcnt_hi_u32_b32 v2, exec_hi, v2 +; GFX940-NEXT: v_cmp_eq_u32_e32 vcc, 0, v2 +; GFX940-NEXT: ; implicit-def: $vgpr2 ; GFX940-NEXT: s_waitcnt lgkmcnt(0) ; GFX940-NEXT: s_add_i32 s3, s3, 4 -; GFX940-NEXT: ; implicit-def: $vgpr1 -; GFX940-NEXT: s_and_saveexec_b64 s[6:7], vcc -; GFX940-NEXT: s_cbranch_execz .LBB29_2 -; GFX940-NEXT: ; %bb.1: -; GFX940-NEXT: s_bcnt1_i32_b64 s4, s[4:5] -; GFX940-NEXT: s_lshl_b32 s8, s3, 3 -; GFX940-NEXT: v_cvt_f32_ubyte0_e32 v1, s4 -; GFX940-NEXT: v_mul_f32_e32 v1, 0x42280000, v1 -; GFX940-NEXT: v_mov_b32_e32 v2, s8 -; GFX940-NEXT: ds_add_rtn_f32 v1, v2, v1 -; GFX940-NEXT: .LBB29_2: -; GFX940-NEXT: s_or_b64 exec, exec, s[6:7] +; GFX940-NEXT: s_and_saveexec_b64 s[4:5], vcc +; GFX940-NEXT: s_xor_b64 s[4:5], exec, s[4:5] +; GFX940-NEXT: s_cbranch_execz .LBB29_4 +; GFX940-NEXT: ; %bb.3: +; GFX940-NEXT: s_lshl_b32 s6, s3, 3 +; GFX940-NEXT: v_mov_b32_e32 v2, s6 +; GFX940-NEXT: ds_add_rtn_f32 v2, v2, v1 +; GFX940-NEXT: .LBB29_4: +; GFX940-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX940-NEXT: s_mov_b64 s[6:7], exec -; GFX940-NEXT: s_waitcnt lgkmcnt(0) -; GFX940-NEXT: v_readfirstlane_b32 s8, v1 ; GFX940-NEXT: v_mbcnt_lo_u32_b32 v1, s6, 0 ; GFX940-NEXT: v_mbcnt_hi_u32_b32 v1, s7, v1 +; GFX940-NEXT: s_waitcnt lgkmcnt(0) +; GFX940-NEXT: v_readfirstlane_b32 s8, v2 ; GFX940-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1 ; GFX940-NEXT: s_and_saveexec_b64 s[4:5], vcc -; GFX940-NEXT: s_cbranch_execz .LBB29_4 -; GFX940-NEXT: ; %bb.3: +; GFX940-NEXT: s_cbranch_execz .LBB29_6 +; GFX940-NEXT: ; %bb.5: ; GFX940-NEXT: s_bcnt1_i32_b64 s6, s[6:7] ; GFX940-NEXT: v_cvt_f32_ubyte0_e32 v1, s6 ; GFX940-NEXT: s_lshl_b32 s3, s3, 4 ; GFX940-NEXT: v_mul_f32_e32 v1, 0x42280000, v1 ; GFX940-NEXT: v_mov_b32_e32 v2, s3 ; GFX940-NEXT: ds_add_f32 v2, v1 -; GFX940-NEXT: .LBB29_4: +; GFX940-NEXT: .LBB29_6: ; GFX940-NEXT: s_or_b64 exec, exec, s[4:5] -; GFX940-NEXT: v_cvt_f32_ubyte0_e32 v0, v0 -; GFX940-NEXT: v_mul_f32_e32 v0, 0x42280000, v0 ; GFX940-NEXT: s_mov_b64 s[4:5], exec ; GFX940-NEXT: v_add_f32_e32 v2, s8, v0 ; GFX940-NEXT: v_bfrev_b32_e32 v1, 1 ; GFX940-NEXT: ; implicit-def: $vgpr0 -; GFX940-NEXT: .LBB29_5: ; %ComputeLoop +; GFX940-NEXT: .LBB29_7: ; %ComputeLoop1 ; GFX940-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX940-NEXT: s_ff1_i32_b64 s3, s[4:5] ; GFX940-NEXT: s_lshl_b64 s[6:7], 1, s3 @@ -7883,19 +7960,19 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs ; GFX940-NEXT: v_writelane_b32 v0, s8, m0 ; GFX940-NEXT: s_cmp_lg_u64 s[4:5], 0 ; GFX940-NEXT: v_add_f32_e32 v1, s9, v1 -; GFX940-NEXT: s_cbranch_scc1 .LBB29_5 -; GFX940-NEXT: ; %bb.6: ; %ComputeEnd +; GFX940-NEXT: s_cbranch_scc1 .LBB29_7 +; GFX940-NEXT: ; %bb.8: ; %ComputeEnd2 ; GFX940-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0 ; GFX940-NEXT: v_mbcnt_hi_u32_b32 v2, exec_hi, v2 ; GFX940-NEXT: v_cmp_eq_u32_e32 vcc, 0, v2 ; GFX940-NEXT: ; implicit-def: $vgpr2 ; GFX940-NEXT: s_and_saveexec_b64 s[4:5], vcc ; GFX940-NEXT: s_xor_b64 s[4:5], exec, s[4:5] -; GFX940-NEXT: s_cbranch_execz .LBB29_8 -; GFX940-NEXT: ; %bb.7: +; GFX940-NEXT: s_cbranch_execz .LBB29_10 +; GFX940-NEXT: ; %bb.9: ; GFX940-NEXT: v_mov_b32_e32 v2, s2 ; GFX940-NEXT: ds_add_rtn_f32 v2, v2, v1 -; GFX940-NEXT: .LBB29_8: +; GFX940-NEXT: .LBB29_10: ; GFX940-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX940-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x0 ; GFX940-NEXT: s_waitcnt lgkmcnt(0) @@ -7908,54 +7985,67 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs ; ; GFX11-LABEL: local_ds_fadd_one_as: ; GFX11: ; %bb.0: +; GFX11-NEXT: v_bfrev_b32_e32 v1, 1 +; GFX11-NEXT: v_mov_b32_e32 v2, 0x42280000 +; GFX11-NEXT: s_mov_b32 s2, exec_lo +; GFX11-NEXT: ; implicit-def: $vgpr0 +; GFX11-NEXT: .LBB29_1: ; %ComputeLoop +; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11-NEXT: s_ctz_i32_b32 s3, s2 +; GFX11-NEXT: v_readfirstlane_b32 s4, v1 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX11-NEXT: v_readlane_b32 s5, v2, s3 +; GFX11-NEXT: s_lshl_b32 s6, 1, s3 +; GFX11-NEXT: s_and_not1_b32 s2, s2, s6 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX11-NEXT: v_writelane_b32 v0, s4, s3 +; GFX11-NEXT: v_add_f32_e32 v1, s5, v1 +; GFX11-NEXT: s_cmp_lg_u32 s2, 0 +; GFX11-NEXT: s_cbranch_scc1 .LBB29_1 +; GFX11-NEXT: ; %bb.2: ; %ComputeEnd ; GFX11-NEXT: s_load_b64 s[2:3], s[0:1], 0x8 -; GFX11-NEXT: s_mov_b32 s5, exec_lo -; GFX11-NEXT: s_mov_b32 s4, exec_lo -; GFX11-NEXT: v_mbcnt_lo_u32_b32 v0, s5, 0 -; GFX11-NEXT: ; implicit-def: $vgpr1 +; GFX11-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(SALU_CYCLE_1) +; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v2 +; GFX11-NEXT: ; implicit-def: $vgpr2 ; GFX11-NEXT: s_waitcnt lgkmcnt(0) ; GFX11-NEXT: s_add_i32 s3, s3, 4 -; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-NEXT: v_cmpx_eq_u32_e32 0, v0 -; GFX11-NEXT: s_cbranch_execz .LBB29_2 -; GFX11-NEXT: ; %bb.1: -; GFX11-NEXT: s_bcnt1_i32_b32 s5, s5 -; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; GFX11-NEXT: v_cvt_f32_ubyte0_e32 v1, s5 +; GFX11-NEXT: s_and_saveexec_b32 s4, vcc_lo +; GFX11-NEXT: s_xor_b32 s4, exec_lo, s4 +; GFX11-NEXT: s_cbranch_execz .LBB29_4 +; GFX11-NEXT: ; %bb.3: ; GFX11-NEXT: s_lshl_b32 s5, s3, 3 -; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) -; GFX11-NEXT: v_dual_mov_b32 v2, s5 :: v_dual_mul_f32 v1, 0x42280000, v1 -; GFX11-NEXT: ds_add_rtn_f32 v1, v2, v1 -; GFX11-NEXT: .LBB29_2: +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: v_mov_b32_e32 v2, s5 +; GFX11-NEXT: ds_add_rtn_f32 v2, v2, v1 +; GFX11-NEXT: .LBB29_4: ; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s4 ; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(VALU_DEP_1) ; GFX11-NEXT: s_mov_b32 s6, exec_lo ; GFX11-NEXT: s_waitcnt lgkmcnt(0) -; GFX11-NEXT: v_readfirstlane_b32 s4, v1 -; GFX11-NEXT: v_mbcnt_lo_u32_b32 v2, s6, 0 +; GFX11-NEXT: v_readfirstlane_b32 s4, v2 +; GFX11-NEXT: v_mbcnt_lo_u32_b32 v1, s6, 0 ; GFX11-NEXT: s_mov_b32 s5, exec_lo -; GFX11-NEXT: v_cmpx_eq_u32_e32 0, v2 -; GFX11-NEXT: s_cbranch_execz .LBB29_4 -; GFX11-NEXT: ; %bb.3: +; GFX11-NEXT: v_cmpx_eq_u32_e32 0, v1 +; GFX11-NEXT: s_cbranch_execz .LBB29_6 +; GFX11-NEXT: ; %bb.5: ; GFX11-NEXT: s_bcnt1_i32_b32 s6, s6 ; GFX11-NEXT: s_lshl_b32 s3, s3, 4 ; GFX11-NEXT: v_cvt_f32_ubyte0_e32 v1, s6 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX11-NEXT: v_dual_mov_b32 v2, s3 :: v_dual_mul_f32 v1, 0x42280000, v1 ; GFX11-NEXT: ds_add_f32 v2, v1 -; GFX11-NEXT: .LBB29_4: +; GFX11-NEXT: .LBB29_6: ; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s5 -; GFX11-NEXT: v_cvt_f32_ubyte0_e32 v0, v0 +; GFX11-NEXT: v_add_f32_e32 v2, s4, v0 ; GFX11-NEXT: v_bfrev_b32_e32 v1, 1 ; GFX11-NEXT: s_mov_b32 s3, exec_lo -; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-NEXT: v_mul_f32_e32 v0, 0x42280000, v0 -; GFX11-NEXT: v_add_f32_e32 v2, s4, v0 ; GFX11-NEXT: ; implicit-def: $vgpr0 -; GFX11-NEXT: .LBB29_5: ; %ComputeLoop +; GFX11-NEXT: .LBB29_7: ; %ComputeLoop1 ; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX11-NEXT: s_ctz_i32_b32 s4, s3 -; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) ; GFX11-NEXT: v_readfirstlane_b32 s5, v1 ; GFX11-NEXT: v_readlane_b32 s6, v2, s4 ; GFX11-NEXT: s_lshl_b32 s7, 1, s4 @@ -7965,19 +8055,19 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) ; GFX11-NEXT: v_add_f32_e32 v1, s6, v1 ; GFX11-NEXT: s_cmp_lg_u32 s3, 0 -; GFX11-NEXT: s_cbranch_scc1 .LBB29_5 -; GFX11-NEXT: ; %bb.6: ; %ComputeEnd +; GFX11-NEXT: s_cbranch_scc1 .LBB29_7 +; GFX11-NEXT: ; %bb.8: ; %ComputeEnd2 ; GFX11-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) ; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v2 ; GFX11-NEXT: ; implicit-def: $vgpr2 ; GFX11-NEXT: s_and_saveexec_b32 s3, vcc_lo ; GFX11-NEXT: s_xor_b32 s3, exec_lo, s3 -; GFX11-NEXT: s_cbranch_execz .LBB29_8 -; GFX11-NEXT: ; %bb.7: +; GFX11-NEXT: s_cbranch_execz .LBB29_10 +; GFX11-NEXT: ; %bb.9: ; GFX11-NEXT: v_mov_b32_e32 v2, s2 ; GFX11-NEXT: ds_add_rtn_f32 v2, v2, v1 -; GFX11-NEXT: .LBB29_8: +; GFX11-NEXT: .LBB29_10: ; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s3 ; GFX11-NEXT: s_load_b64 s[0:1], s[0:1], 0x0 ; GFX11-NEXT: s_waitcnt lgkmcnt(0) @@ -7991,49 +8081,60 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs ; ; GFX10-LABEL: local_ds_fadd_one_as: ; GFX10: ; %bb.0: +; GFX10-NEXT: v_bfrev_b32_e32 v1, 1 +; GFX10-NEXT: v_mov_b32_e32 v2, 0x42280000 +; GFX10-NEXT: s_mov_b32 s2, exec_lo +; GFX10-NEXT: ; implicit-def: $vgpr0 +; GFX10-NEXT: .LBB29_1: ; %ComputeLoop +; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX10-NEXT: s_ff1_i32_b32 s3, s2 +; GFX10-NEXT: v_readfirstlane_b32 s4, v1 +; GFX10-NEXT: v_readlane_b32 s5, v2, s3 +; GFX10-NEXT: s_lshl_b32 s6, 1, s3 +; GFX10-NEXT: s_andn2_b32 s2, s2, s6 +; GFX10-NEXT: v_writelane_b32 v0, s4, s3 +; GFX10-NEXT: v_add_f32_e32 v1, s5, v1 +; GFX10-NEXT: s_cmp_lg_u32 s2, 0 +; GFX10-NEXT: s_cbranch_scc1 .LBB29_1 +; GFX10-NEXT: ; %bb.2: ; %ComputeEnd ; GFX10-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x8 -; GFX10-NEXT: s_mov_b32 s5, exec_lo -; GFX10-NEXT: ; implicit-def: $vgpr1 -; GFX10-NEXT: v_mbcnt_lo_u32_b32 v0, s5, 0 -; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0 +; GFX10-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0 +; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v2 +; GFX10-NEXT: ; implicit-def: $vgpr2 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) ; GFX10-NEXT: s_add_i32 s3, s3, 4 ; GFX10-NEXT: s_and_saveexec_b32 s4, vcc_lo -; GFX10-NEXT: s_cbranch_execz .LBB29_2 -; GFX10-NEXT: ; %bb.1: -; GFX10-NEXT: s_bcnt1_i32_b32 s5, s5 -; GFX10-NEXT: v_cvt_f32_ubyte0_e32 v1, s5 +; GFX10-NEXT: s_xor_b32 s4, exec_lo, s4 +; GFX10-NEXT: s_cbranch_execz .LBB29_4 +; GFX10-NEXT: ; %bb.3: ; GFX10-NEXT: s_lshl_b32 s5, s3, 3 ; GFX10-NEXT: v_mov_b32_e32 v2, s5 -; GFX10-NEXT: v_mul_f32_e32 v1, 0x42280000, v1 -; GFX10-NEXT: ds_add_rtn_f32 v1, v2, v1 -; GFX10-NEXT: .LBB29_2: +; GFX10-NEXT: ds_add_rtn_f32 v2, v2, v1 +; GFX10-NEXT: .LBB29_4: ; GFX10-NEXT: s_waitcnt_depctr 0xffe3 ; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s4 ; GFX10-NEXT: s_mov_b32 s6, exec_lo ; GFX10-NEXT: s_waitcnt lgkmcnt(0) -; GFX10-NEXT: v_readfirstlane_b32 s4, v1 -; GFX10-NEXT: v_mbcnt_lo_u32_b32 v2, s6, 0 -; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v2 +; GFX10-NEXT: v_readfirstlane_b32 s4, v2 +; GFX10-NEXT: v_mbcnt_lo_u32_b32 v1, s6, 0 +; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v1 ; GFX10-NEXT: s_and_saveexec_b32 s5, vcc_lo -; GFX10-NEXT: s_cbranch_execz .LBB29_4 -; GFX10-NEXT: ; %bb.3: +; GFX10-NEXT: s_cbranch_execz .LBB29_6 +; GFX10-NEXT: ; %bb.5: ; GFX10-NEXT: s_bcnt1_i32_b32 s6, s6 ; GFX10-NEXT: s_lshl_b32 s3, s3, 4 ; GFX10-NEXT: v_cvt_f32_ubyte0_e32 v1, s6 ; GFX10-NEXT: v_mov_b32_e32 v2, s3 ; GFX10-NEXT: v_mul_f32_e32 v1, 0x42280000, v1 ; GFX10-NEXT: ds_add_f32 v2, v1 -; GFX10-NEXT: .LBB29_4: +; GFX10-NEXT: .LBB29_6: ; GFX10-NEXT: s_waitcnt_depctr 0xffe3 ; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s5 -; GFX10-NEXT: v_cvt_f32_ubyte0_e32 v0, v0 +; GFX10-NEXT: v_add_f32_e32 v2, s4, v0 ; GFX10-NEXT: v_bfrev_b32_e32 v1, 1 ; GFX10-NEXT: s_mov_b32 s3, exec_lo -; GFX10-NEXT: v_mul_f32_e32 v0, 0x42280000, v0 -; GFX10-NEXT: v_add_f32_e32 v2, s4, v0 ; GFX10-NEXT: ; implicit-def: $vgpr0 -; GFX10-NEXT: .LBB29_5: ; %ComputeLoop +; GFX10-NEXT: .LBB29_7: ; %ComputeLoop1 ; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX10-NEXT: s_ff1_i32_b32 s4, s3 ; GFX10-NEXT: v_readfirstlane_b32 s5, v1 @@ -8043,18 +8144,18 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs ; GFX10-NEXT: v_writelane_b32 v0, s5, s4 ; GFX10-NEXT: v_add_f32_e32 v1, s6, v1 ; GFX10-NEXT: s_cmp_lg_u32 s3, 0 -; GFX10-NEXT: s_cbranch_scc1 .LBB29_5 -; GFX10-NEXT: ; %bb.6: ; %ComputeEnd +; GFX10-NEXT: s_cbranch_scc1 .LBB29_7 +; GFX10-NEXT: ; %bb.8: ; %ComputeEnd2 ; GFX10-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0 ; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v2 ; GFX10-NEXT: ; implicit-def: $vgpr2 ; GFX10-NEXT: s_and_saveexec_b32 s3, vcc_lo ; GFX10-NEXT: s_xor_b32 s3, exec_lo, s3 -; GFX10-NEXT: s_cbranch_execz .LBB29_8 -; GFX10-NEXT: ; %bb.7: +; GFX10-NEXT: s_cbranch_execz .LBB29_10 +; GFX10-NEXT: ; %bb.9: ; GFX10-NEXT: v_mov_b32_e32 v2, s2 ; GFX10-NEXT: ds_add_rtn_f32 v2, v2, v1 -; GFX10-NEXT: .LBB29_8: +; GFX10-NEXT: .LBB29_10: ; GFX10-NEXT: s_waitcnt_depctr 0xffe3 ; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s3 ; GFX10-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x0 @@ -8068,49 +8169,61 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs ; ; GFX90A-LABEL: local_ds_fadd_one_as: ; GFX90A: ; %bb.0: +; GFX90A-NEXT: s_mov_b64 s[2:3], exec +; GFX90A-NEXT: v_bfrev_b32_e32 v1, 1 +; GFX90A-NEXT: v_mov_b32_e32 v2, 0x42280000 +; GFX90A-NEXT: ; implicit-def: $vgpr0 +; GFX90A-NEXT: .LBB29_1: ; %ComputeLoop +; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX90A-NEXT: s_ff1_i32_b64 s6, s[2:3] +; GFX90A-NEXT: s_lshl_b64 s[4:5], 1, s6 +; GFX90A-NEXT: v_readfirstlane_b32 s7, v1 +; GFX90A-NEXT: v_readlane_b32 s8, v2, s6 +; GFX90A-NEXT: s_mov_b32 m0, s6 +; GFX90A-NEXT: s_andn2_b64 s[2:3], s[2:3], s[4:5] +; GFX90A-NEXT: v_writelane_b32 v0, s7, m0 +; GFX90A-NEXT: s_cmp_lg_u64 s[2:3], 0 +; GFX90A-NEXT: v_add_f32_e32 v1, s8, v1 +; GFX90A-NEXT: s_cbranch_scc1 .LBB29_1 +; GFX90A-NEXT: ; %bb.2: ; %ComputeEnd ; GFX90A-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x8 -; GFX90A-NEXT: s_mov_b64 s[4:5], exec -; GFX90A-NEXT: v_mbcnt_lo_u32_b32 v0, s4, 0 -; GFX90A-NEXT: v_mbcnt_hi_u32_b32 v0, s5, v0 -; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0 +; GFX90A-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0 +; GFX90A-NEXT: v_mbcnt_hi_u32_b32 v2, exec_hi, v2 +; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, 0, v2 +; GFX90A-NEXT: ; implicit-def: $vgpr2 ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) ; GFX90A-NEXT: s_add_i32 s3, s3, 4 -; GFX90A-NEXT: ; implicit-def: $vgpr1 -; GFX90A-NEXT: s_and_saveexec_b64 s[6:7], vcc -; GFX90A-NEXT: s_cbranch_execz .LBB29_2 -; GFX90A-NEXT: ; %bb.1: -; GFX90A-NEXT: s_bcnt1_i32_b64 s4, s[4:5] -; GFX90A-NEXT: s_lshl_b32 s8, s3, 3 -; GFX90A-NEXT: v_cvt_f32_ubyte0_e32 v1, s4 -; GFX90A-NEXT: v_mul_f32_e32 v1, 0x42280000, v1 -; GFX90A-NEXT: v_mov_b32_e32 v2, s8 -; GFX90A-NEXT: ds_add_rtn_f32 v1, v2, v1 -; GFX90A-NEXT: .LBB29_2: -; GFX90A-NEXT: s_or_b64 exec, exec, s[6:7] +; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], vcc +; GFX90A-NEXT: s_xor_b64 s[4:5], exec, s[4:5] +; GFX90A-NEXT: s_cbranch_execz .LBB29_4 +; GFX90A-NEXT: ; %bb.3: +; GFX90A-NEXT: s_lshl_b32 s6, s3, 3 +; GFX90A-NEXT: v_mov_b32_e32 v2, s6 +; GFX90A-NEXT: ds_add_rtn_f32 v2, v2, v1 +; GFX90A-NEXT: .LBB29_4: +; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX90A-NEXT: s_mov_b64 s[6:7], exec -; GFX90A-NEXT: s_waitcnt lgkmcnt(0) -; GFX90A-NEXT: v_readfirstlane_b32 s8, v1 ; GFX90A-NEXT: v_mbcnt_lo_u32_b32 v1, s6, 0 ; GFX90A-NEXT: v_mbcnt_hi_u32_b32 v1, s7, v1 +; GFX90A-NEXT: s_waitcnt lgkmcnt(0) +; GFX90A-NEXT: v_readfirstlane_b32 s8, v2 ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1 ; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], vcc -; GFX90A-NEXT: s_cbranch_execz .LBB29_4 -; GFX90A-NEXT: ; %bb.3: +; GFX90A-NEXT: s_cbranch_execz .LBB29_6 +; GFX90A-NEXT: ; %bb.5: ; GFX90A-NEXT: s_bcnt1_i32_b64 s6, s[6:7] ; GFX90A-NEXT: v_cvt_f32_ubyte0_e32 v1, s6 ; GFX90A-NEXT: s_lshl_b32 s3, s3, 4 ; GFX90A-NEXT: v_mul_f32_e32 v1, 0x42280000, v1 ; GFX90A-NEXT: v_mov_b32_e32 v2, s3 ; GFX90A-NEXT: ds_add_f32 v2, v1 -; GFX90A-NEXT: .LBB29_4: +; GFX90A-NEXT: .LBB29_6: ; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5] -; GFX90A-NEXT: v_cvt_f32_ubyte0_e32 v0, v0 -; GFX90A-NEXT: v_mul_f32_e32 v0, 0x42280000, v0 ; GFX90A-NEXT: s_mov_b64 s[4:5], exec ; GFX90A-NEXT: v_add_f32_e32 v2, s8, v0 ; GFX90A-NEXT: v_bfrev_b32_e32 v1, 1 ; GFX90A-NEXT: ; implicit-def: $vgpr0 -; GFX90A-NEXT: .LBB29_5: ; %ComputeLoop +; GFX90A-NEXT: .LBB29_7: ; %ComputeLoop1 ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_ff1_i32_b64 s3, s[4:5] ; GFX90A-NEXT: s_lshl_b64 s[6:7], 1, s3 @@ -8121,19 +8234,19 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs ; GFX90A-NEXT: v_writelane_b32 v0, s8, m0 ; GFX90A-NEXT: s_cmp_lg_u64 s[4:5], 0 ; GFX90A-NEXT: v_add_f32_e32 v1, s9, v1 -; GFX90A-NEXT: s_cbranch_scc1 .LBB29_5 -; GFX90A-NEXT: ; %bb.6: ; %ComputeEnd +; GFX90A-NEXT: s_cbranch_scc1 .LBB29_7 +; GFX90A-NEXT: ; %bb.8: ; %ComputeEnd2 ; GFX90A-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0 ; GFX90A-NEXT: v_mbcnt_hi_u32_b32 v2, exec_hi, v2 ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, 0, v2 ; GFX90A-NEXT: ; implicit-def: $vgpr2 ; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], vcc ; GFX90A-NEXT: s_xor_b64 s[4:5], exec, s[4:5] -; GFX90A-NEXT: s_cbranch_execz .LBB29_8 -; GFX90A-NEXT: ; %bb.7: +; GFX90A-NEXT: s_cbranch_execz .LBB29_10 +; GFX90A-NEXT: ; %bb.9: ; GFX90A-NEXT: v_mov_b32_e32 v2, s2 ; GFX90A-NEXT: ds_add_rtn_f32 v2, v2, v1 -; GFX90A-NEXT: .LBB29_8: +; GFX90A-NEXT: .LBB29_10: ; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX90A-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x0 ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) @@ -8145,49 +8258,61 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs ; ; GFX908-LABEL: local_ds_fadd_one_as: ; GFX908: ; %bb.0: +; GFX908-NEXT: s_mov_b64 s[2:3], exec +; GFX908-NEXT: v_bfrev_b32_e32 v1, 1 +; GFX908-NEXT: v_mov_b32_e32 v2, 0x42280000 +; GFX908-NEXT: ; implicit-def: $vgpr0 +; GFX908-NEXT: .LBB29_1: ; %ComputeLoop +; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX908-NEXT: s_ff1_i32_b64 s6, s[2:3] +; GFX908-NEXT: s_lshl_b64 s[4:5], 1, s6 +; GFX908-NEXT: v_readfirstlane_b32 s7, v1 +; GFX908-NEXT: v_readlane_b32 s8, v2, s6 +; GFX908-NEXT: s_mov_b32 m0, s6 +; GFX908-NEXT: s_andn2_b64 s[2:3], s[2:3], s[4:5] +; GFX908-NEXT: v_writelane_b32 v0, s7, m0 +; GFX908-NEXT: s_cmp_lg_u64 s[2:3], 0 +; GFX908-NEXT: v_add_f32_e32 v1, s8, v1 +; GFX908-NEXT: s_cbranch_scc1 .LBB29_1 +; GFX908-NEXT: ; %bb.2: ; %ComputeEnd ; GFX908-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x8 -; GFX908-NEXT: s_mov_b64 s[4:5], exec -; GFX908-NEXT: v_mbcnt_lo_u32_b32 v0, s4, 0 -; GFX908-NEXT: v_mbcnt_hi_u32_b32 v0, s5, v0 -; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0 +; GFX908-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0 +; GFX908-NEXT: v_mbcnt_hi_u32_b32 v2, exec_hi, v2 +; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, 0, v2 +; GFX908-NEXT: ; implicit-def: $vgpr2 ; GFX908-NEXT: s_waitcnt lgkmcnt(0) ; GFX908-NEXT: s_add_i32 s3, s3, 4 -; GFX908-NEXT: ; implicit-def: $vgpr1 -; GFX908-NEXT: s_and_saveexec_b64 s[6:7], vcc -; GFX908-NEXT: s_cbranch_execz .LBB29_2 -; GFX908-NEXT: ; %bb.1: -; GFX908-NEXT: s_bcnt1_i32_b64 s4, s[4:5] -; GFX908-NEXT: s_lshl_b32 s8, s3, 3 -; GFX908-NEXT: v_cvt_f32_ubyte0_e32 v1, s4 -; GFX908-NEXT: v_mul_f32_e32 v1, 0x42280000, v1 -; GFX908-NEXT: v_mov_b32_e32 v2, s8 -; GFX908-NEXT: ds_add_rtn_f32 v1, v2, v1 -; GFX908-NEXT: .LBB29_2: -; GFX908-NEXT: s_or_b64 exec, exec, s[6:7] +; GFX908-NEXT: s_and_saveexec_b64 s[4:5], vcc +; GFX908-NEXT: s_xor_b64 s[4:5], exec, s[4:5] +; GFX908-NEXT: s_cbranch_execz .LBB29_4 +; GFX908-NEXT: ; %bb.3: +; GFX908-NEXT: s_lshl_b32 s6, s3, 3 +; GFX908-NEXT: v_mov_b32_e32 v2, s6 +; GFX908-NEXT: ds_add_rtn_f32 v2, v2, v1 +; GFX908-NEXT: .LBB29_4: +; GFX908-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_mov_b64 s[6:7], exec -; GFX908-NEXT: s_waitcnt lgkmcnt(0) -; GFX908-NEXT: v_readfirstlane_b32 s8, v1 ; GFX908-NEXT: v_mbcnt_lo_u32_b32 v1, s6, 0 ; GFX908-NEXT: v_mbcnt_hi_u32_b32 v1, s7, v1 +; GFX908-NEXT: s_waitcnt lgkmcnt(0) +; GFX908-NEXT: v_readfirstlane_b32 s8, v2 ; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1 ; GFX908-NEXT: s_and_saveexec_b64 s[4:5], vcc -; GFX908-NEXT: s_cbranch_execz .LBB29_4 -; GFX908-NEXT: ; %bb.3: +; GFX908-NEXT: s_cbranch_execz .LBB29_6 +; GFX908-NEXT: ; %bb.5: ; GFX908-NEXT: s_bcnt1_i32_b64 s6, s[6:7] ; GFX908-NEXT: v_cvt_f32_ubyte0_e32 v1, s6 ; GFX908-NEXT: s_lshl_b32 s3, s3, 4 ; GFX908-NEXT: v_mul_f32_e32 v1, 0x42280000, v1 ; GFX908-NEXT: v_mov_b32_e32 v2, s3 ; GFX908-NEXT: ds_add_f32 v2, v1 -; GFX908-NEXT: .LBB29_4: +; GFX908-NEXT: .LBB29_6: ; GFX908-NEXT: s_or_b64 exec, exec, s[4:5] -; GFX908-NEXT: v_cvt_f32_ubyte0_e32 v0, v0 -; GFX908-NEXT: v_mul_f32_e32 v0, 0x42280000, v0 ; GFX908-NEXT: s_mov_b64 s[4:5], exec ; GFX908-NEXT: v_add_f32_e32 v2, s8, v0 ; GFX908-NEXT: v_bfrev_b32_e32 v1, 1 ; GFX908-NEXT: ; implicit-def: $vgpr0 -; GFX908-NEXT: .LBB29_5: ; %ComputeLoop +; GFX908-NEXT: .LBB29_7: ; %ComputeLoop1 ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_ff1_i32_b64 s3, s[4:5] ; GFX908-NEXT: s_lshl_b64 s[6:7], 1, s3 @@ -8198,19 +8323,19 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs ; GFX908-NEXT: v_writelane_b32 v0, s8, m0 ; GFX908-NEXT: s_cmp_lg_u64 s[4:5], 0 ; GFX908-NEXT: v_add_f32_e32 v1, s9, v1 -; GFX908-NEXT: s_cbranch_scc1 .LBB29_5 -; GFX908-NEXT: ; %bb.6: ; %ComputeEnd +; GFX908-NEXT: s_cbranch_scc1 .LBB29_7 +; GFX908-NEXT: ; %bb.8: ; %ComputeEnd2 ; GFX908-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0 ; GFX908-NEXT: v_mbcnt_hi_u32_b32 v2, exec_hi, v2 ; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, 0, v2 ; GFX908-NEXT: ; implicit-def: $vgpr2 ; GFX908-NEXT: s_and_saveexec_b64 s[4:5], vcc ; GFX908-NEXT: s_xor_b64 s[4:5], exec, s[4:5] -; GFX908-NEXT: s_cbranch_execz .LBB29_8 -; GFX908-NEXT: ; %bb.7: +; GFX908-NEXT: s_cbranch_execz .LBB29_10 +; GFX908-NEXT: ; %bb.9: ; GFX908-NEXT: v_mov_b32_e32 v2, s2 ; GFX908-NEXT: ds_add_rtn_f32 v2, v2, v1 -; GFX908-NEXT: .LBB29_8: +; GFX908-NEXT: .LBB29_10: ; GFX908-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x0 ; GFX908-NEXT: s_waitcnt lgkmcnt(0) @@ -8222,50 +8347,62 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs ; ; GFX8-LABEL: local_ds_fadd_one_as: ; GFX8: ; %bb.0: +; GFX8-NEXT: s_mov_b64 s[2:3], exec +; GFX8-NEXT: v_bfrev_b32_e32 v1, 1 +; GFX8-NEXT: v_mov_b32_e32 v2, 0x42280000 +; GFX8-NEXT: ; implicit-def: $vgpr0 +; GFX8-NEXT: .LBB29_1: ; %ComputeLoop +; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX8-NEXT: s_ff1_i32_b64 s6, s[2:3] +; GFX8-NEXT: s_lshl_b64 s[4:5], 1, s6 +; GFX8-NEXT: v_readfirstlane_b32 s7, v1 +; GFX8-NEXT: v_readlane_b32 s8, v2, s6 +; GFX8-NEXT: s_mov_b32 m0, s6 +; GFX8-NEXT: s_andn2_b64 s[2:3], s[2:3], s[4:5] +; GFX8-NEXT: v_writelane_b32 v0, s7, m0 +; GFX8-NEXT: s_cmp_lg_u64 s[2:3], 0 +; GFX8-NEXT: v_add_f32_e32 v1, s8, v1 +; GFX8-NEXT: s_cbranch_scc1 .LBB29_1 +; GFX8-NEXT: ; %bb.2: ; %ComputeEnd ; GFX8-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x8 -; GFX8-NEXT: s_mov_b64 s[4:5], exec -; GFX8-NEXT: v_mbcnt_lo_u32_b32 v0, s4, 0 -; GFX8-NEXT: v_mbcnt_hi_u32_b32 v0, s5, v0 -; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0 +; GFX8-NEXT: v_mbcnt_lo_u32_b32 v3, exec_lo, 0 +; GFX8-NEXT: v_mbcnt_hi_u32_b32 v3, exec_hi, v3 +; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 0, v3 +; GFX8-NEXT: ; implicit-def: $vgpr2 +; GFX8-NEXT: s_mov_b32 m0, -1 ; GFX8-NEXT: s_waitcnt lgkmcnt(0) ; GFX8-NEXT: s_add_i32 s3, s3, 4 -; GFX8-NEXT: ; implicit-def: $vgpr1 -; GFX8-NEXT: s_mov_b32 m0, -1 -; GFX8-NEXT: s_and_saveexec_b64 s[6:7], vcc -; GFX8-NEXT: s_cbranch_execz .LBB29_2 -; GFX8-NEXT: ; %bb.1: -; GFX8-NEXT: s_bcnt1_i32_b64 s4, s[4:5] -; GFX8-NEXT: s_lshl_b32 s8, s3, 3 -; GFX8-NEXT: v_cvt_f32_ubyte0_e32 v1, s4 -; GFX8-NEXT: v_mul_f32_e32 v1, 0x42280000, v1 -; GFX8-NEXT: v_mov_b32_e32 v2, s8 -; GFX8-NEXT: ds_add_rtn_f32 v1, v2, v1 -; GFX8-NEXT: .LBB29_2: -; GFX8-NEXT: s_or_b64 exec, exec, s[6:7] +; GFX8-NEXT: s_and_saveexec_b64 s[4:5], vcc +; GFX8-NEXT: s_xor_b64 s[4:5], exec, s[4:5] +; GFX8-NEXT: s_cbranch_execz .LBB29_4 +; GFX8-NEXT: ; %bb.3: +; GFX8-NEXT: s_lshl_b32 s6, s3, 3 +; GFX8-NEXT: v_mov_b32_e32 v2, s6 +; GFX8-NEXT: ds_add_rtn_f32 v2, v2, v1 +; GFX8-NEXT: .LBB29_4: +; GFX8-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_mov_b64 s[6:7], exec -; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: v_readfirstlane_b32 s8, v1 ; GFX8-NEXT: v_mbcnt_lo_u32_b32 v1, s6, 0 ; GFX8-NEXT: v_mbcnt_hi_u32_b32 v1, s7, v1 +; GFX8-NEXT: s_waitcnt lgkmcnt(0) +; GFX8-NEXT: v_readfirstlane_b32 s8, v2 ; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1 ; GFX8-NEXT: s_and_saveexec_b64 s[4:5], vcc -; GFX8-NEXT: s_cbranch_execz .LBB29_4 -; GFX8-NEXT: ; %bb.3: +; GFX8-NEXT: s_cbranch_execz .LBB29_6 +; GFX8-NEXT: ; %bb.5: ; GFX8-NEXT: s_bcnt1_i32_b64 s6, s[6:7] ; GFX8-NEXT: v_cvt_f32_ubyte0_e32 v1, s6 ; GFX8-NEXT: s_lshl_b32 s3, s3, 4 ; GFX8-NEXT: v_mul_f32_e32 v1, 0x42280000, v1 ; GFX8-NEXT: v_mov_b32_e32 v2, s3 ; GFX8-NEXT: ds_add_f32 v2, v1 -; GFX8-NEXT: .LBB29_4: +; GFX8-NEXT: .LBB29_6: ; GFX8-NEXT: s_or_b64 exec, exec, s[4:5] -; GFX8-NEXT: v_cvt_f32_ubyte0_e32 v0, v0 -; GFX8-NEXT: v_mul_f32_e32 v0, 0x42280000, v0 ; GFX8-NEXT: s_mov_b64 s[4:5], exec ; GFX8-NEXT: v_add_f32_e32 v2, s8, v0 ; GFX8-NEXT: v_bfrev_b32_e32 v1, 1 ; GFX8-NEXT: ; implicit-def: $vgpr0 -; GFX8-NEXT: .LBB29_5: ; %ComputeLoop +; GFX8-NEXT: .LBB29_7: ; %ComputeLoop1 ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_ff1_i32_b64 s3, s[4:5] ; GFX8-NEXT: s_lshl_b64 s[6:7], 1, s3 @@ -8276,20 +8413,20 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs ; GFX8-NEXT: v_writelane_b32 v0, s8, m0 ; GFX8-NEXT: s_cmp_lg_u64 s[4:5], 0 ; GFX8-NEXT: v_add_f32_e32 v1, s9, v1 -; GFX8-NEXT: s_cbranch_scc1 .LBB29_5 -; GFX8-NEXT: ; %bb.6: ; %ComputeEnd +; GFX8-NEXT: s_cbranch_scc1 .LBB29_7 +; GFX8-NEXT: ; %bb.8: ; %ComputeEnd2 ; GFX8-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0 ; GFX8-NEXT: v_mbcnt_hi_u32_b32 v2, exec_hi, v2 ; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 0, v2 ; GFX8-NEXT: ; implicit-def: $vgpr2 ; GFX8-NEXT: s_and_saveexec_b64 s[4:5], vcc ; GFX8-NEXT: s_xor_b64 s[4:5], exec, s[4:5] -; GFX8-NEXT: s_cbranch_execz .LBB29_8 -; GFX8-NEXT: ; %bb.7: +; GFX8-NEXT: s_cbranch_execz .LBB29_10 +; GFX8-NEXT: ; %bb.9: ; GFX8-NEXT: v_mov_b32_e32 v2, s2 ; GFX8-NEXT: s_mov_b32 m0, -1 ; GFX8-NEXT: ds_add_rtn_f32 v2, v2, v1 -; GFX8-NEXT: .LBB29_8: +; GFX8-NEXT: .LBB29_10: ; GFX8-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x0 ; GFX8-NEXT: s_waitcnt lgkmcnt(0) @@ -8303,47 +8440,35 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs ; GFX7-LABEL: local_ds_fadd_one_as: ; GFX7: ; %bb.0: ; GFX7-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x2 -; GFX7-NEXT: s_mov_b64 s[6:7], exec -; GFX7-NEXT: v_mbcnt_lo_u32_b32_e64 v0, s6, 0 -; GFX7-NEXT: v_mbcnt_hi_u32_b32_e32 v0, s7, v0 -; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0 +; GFX7-NEXT: s_mov_b32 m0, -1 ; GFX7-NEXT: s_waitcnt lgkmcnt(0) +; GFX7-NEXT: s_lshl_b32 s4, s3, 3 +; GFX7-NEXT: v_mov_b32_e32 v0, s4 +; GFX7-NEXT: ds_read_b32 v0, v0 offset:32 ; GFX7-NEXT: s_add_i32 s3, s3, 4 -; GFX7-NEXT: ; implicit-def: $vgpr1 -; GFX7-NEXT: s_mov_b32 m0, -1 -; GFX7-NEXT: s_and_saveexec_b64 s[4:5], vcc -; GFX7-NEXT: s_cbranch_execz .LBB29_4 -; GFX7-NEXT: ; %bb.1: -; GFX7-NEXT: s_lshl_b32 s8, s3, 3 -; GFX7-NEXT: v_mov_b32_e32 v2, s8 -; GFX7-NEXT: ds_read_b32 v1, v2 -; GFX7-NEXT: s_bcnt1_i32_b64 s6, s[6:7] -; GFX7-NEXT: v_cvt_f32_ubyte0_e32 v3, s6 -; GFX7-NEXT: v_mul_f32_e32 v3, 0x42280000, v3 -; GFX7-NEXT: s_mov_b64 s[6:7], 0 -; GFX7-NEXT: .LBB29_2: ; %atomicrmw.start +; GFX7-NEXT: s_lshl_b32 s6, s3, 3 +; GFX7-NEXT: s_mov_b64 s[4:5], 0 +; GFX7-NEXT: v_mov_b32_e32 v1, s6 +; GFX7-NEXT: .LBB29_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: s_waitcnt lgkmcnt(0) -; GFX7-NEXT: v_mov_b32_e32 v4, v1 -; GFX7-NEXT: v_add_f32_e32 v1, v4, v3 -; GFX7-NEXT: ds_cmpst_rtn_b32 v1, v2, v4, v1 +; GFX7-NEXT: v_mov_b32_e32 v2, v0 +; GFX7-NEXT: v_add_f32_e32 v0, 0x42280000, v2 +; GFX7-NEXT: ds_cmpst_rtn_b32 v0, v1, v2, v0 ; GFX7-NEXT: s_waitcnt lgkmcnt(0) -; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v1, v4 -; GFX7-NEXT: s_or_b64 s[6:7], vcc, s[6:7] -; GFX7-NEXT: s_andn2_b64 exec, exec, s[6:7] -; GFX7-NEXT: s_cbranch_execnz .LBB29_2 -; GFX7-NEXT: ; %bb.3: ; %Flow18 -; GFX7-NEXT: s_or_b64 exec, exec, s[6:7] -; GFX7-NEXT: .LBB29_4: ; %Flow19 +; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v0, v2 +; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5] +; GFX7-NEXT: s_cbranch_execnz .LBB29_1 +; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX7-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_mov_b64 s[6:7], exec -; GFX7-NEXT: v_readfirstlane_b32 s8, v1 ; GFX7-NEXT: v_mbcnt_lo_u32_b32_e64 v1, s6, 0 ; GFX7-NEXT: v_mbcnt_hi_u32_b32_e32 v1, s7, v1 ; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1 ; GFX7-NEXT: s_and_saveexec_b64 s[4:5], vcc -; GFX7-NEXT: s_cbranch_execz .LBB29_7 -; GFX7-NEXT: ; %bb.5: +; GFX7-NEXT: s_cbranch_execz .LBB29_5 +; GFX7-NEXT: ; %bb.3: ; GFX7-NEXT: s_lshl_b32 s3, s3, 4 ; GFX7-NEXT: v_mov_b32_e32 v1, s3 ; GFX7-NEXT: ds_read_b32 v3, v1 @@ -8351,7 +8476,7 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs ; GFX7-NEXT: v_cvt_f32_ubyte0_e32 v2, s3 ; GFX7-NEXT: v_mul_f32_e32 v2, 0x42280000, v2 ; GFX7-NEXT: s_mov_b64 s[6:7], 0 -; GFX7-NEXT: .LBB29_6: ; %atomicrmw.start2 +; GFX7-NEXT: .LBB29_4: ; %atomicrmw.start2 ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: s_waitcnt lgkmcnt(0) ; GFX7-NEXT: v_add_f32_e32 v4, v3, v2 @@ -8361,16 +8486,13 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs ; GFX7-NEXT: s_or_b64 s[6:7], vcc, s[6:7] ; GFX7-NEXT: v_mov_b32_e32 v3, v4 ; GFX7-NEXT: s_andn2_b64 exec, exec, s[6:7] -; GFX7-NEXT: s_cbranch_execnz .LBB29_6 -; GFX7-NEXT: .LBB29_7: ; %Flow17 +; GFX7-NEXT: s_cbranch_execnz .LBB29_4 +; GFX7-NEXT: .LBB29_5: ; %Flow17 ; GFX7-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX7-NEXT: v_mov_b32_e32 v2, s2 ; GFX7-NEXT: ds_read_b32 v1, v2 -; GFX7-NEXT: v_cvt_f32_ubyte0_e32 v0, v0 -; GFX7-NEXT: v_mul_f32_e32 v0, 0x42280000, v0 -; GFX7-NEXT: v_add_f32_e32 v0, s8, v0 ; GFX7-NEXT: s_mov_b64 s[2:3], 0 -; GFX7-NEXT: .LBB29_8: ; %atomicrmw.start8 +; GFX7-NEXT: .LBB29_6: ; %atomicrmw.start8 ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: s_waitcnt lgkmcnt(0) ; GFX7-NEXT: v_mov_b32_e32 v3, v1 @@ -8380,8 +8502,8 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs ; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v1, v3 ; GFX7-NEXT: s_or_b64 s[2:3], vcc, s[2:3] ; GFX7-NEXT: s_andn2_b64 exec, exec, s[2:3] -; GFX7-NEXT: s_cbranch_execnz .LBB29_8 -; GFX7-NEXT: ; %bb.9: ; %atomicrmw.end7 +; GFX7-NEXT: s_cbranch_execnz .LBB29_6 +; GFX7-NEXT: ; %bb.7: ; %atomicrmw.end7 ; GFX7-NEXT: s_or_b64 exec, exec, s[2:3] ; GFX7-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x0 ; GFX7-NEXT: s_mov_b32 s3, 0xf000 @@ -8393,47 +8515,36 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs ; GFX6-LABEL: local_ds_fadd_one_as: ; GFX6: ; %bb.0: ; GFX6-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x2 -; GFX6-NEXT: s_mov_b64 s[6:7], exec -; GFX6-NEXT: v_mbcnt_lo_u32_b32_e64 v0, s6, 0 -; GFX6-NEXT: v_mbcnt_hi_u32_b32_e32 v0, s7, v0 -; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0 +; GFX6-NEXT: s_mov_b32 m0, -1 ; GFX6-NEXT: s_waitcnt lgkmcnt(0) +; GFX6-NEXT: s_lshl_b32 s4, s3, 3 +; GFX6-NEXT: s_add_i32 s4, s4, 32 +; GFX6-NEXT: v_mov_b32_e32 v0, s4 +; GFX6-NEXT: ds_read_b32 v0, v0 ; GFX6-NEXT: s_add_i32 s3, s3, 4 -; GFX6-NEXT: ; implicit-def: $vgpr1 -; GFX6-NEXT: s_mov_b32 m0, -1 -; GFX6-NEXT: s_and_saveexec_b64 s[4:5], vcc -; GFX6-NEXT: s_cbranch_execz .LBB29_4 -; GFX6-NEXT: ; %bb.1: -; GFX6-NEXT: s_lshl_b32 s8, s3, 3 -; GFX6-NEXT: v_mov_b32_e32 v2, s8 -; GFX6-NEXT: ds_read_b32 v1, v2 -; GFX6-NEXT: s_bcnt1_i32_b64 s6, s[6:7] -; GFX6-NEXT: v_cvt_f32_ubyte0_e32 v3, s6 -; GFX6-NEXT: v_mul_f32_e32 v3, 0x42280000, v3 -; GFX6-NEXT: s_mov_b64 s[6:7], 0 -; GFX6-NEXT: .LBB29_2: ; %atomicrmw.start +; GFX6-NEXT: s_lshl_b32 s6, s3, 3 +; GFX6-NEXT: s_mov_b64 s[4:5], 0 +; GFX6-NEXT: v_mov_b32_e32 v1, s6 +; GFX6-NEXT: .LBB29_1: ; %atomicrmw.start ; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX6-NEXT: s_waitcnt lgkmcnt(0) -; GFX6-NEXT: v_mov_b32_e32 v4, v1 -; GFX6-NEXT: v_add_f32_e32 v1, v4, v3 -; GFX6-NEXT: ds_cmpst_rtn_b32 v1, v2, v4, v1 +; GFX6-NEXT: v_mov_b32_e32 v2, v0 +; GFX6-NEXT: v_add_f32_e32 v0, 0x42280000, v2 +; GFX6-NEXT: ds_cmpst_rtn_b32 v0, v1, v2, v0 ; GFX6-NEXT: s_waitcnt lgkmcnt(0) -; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v1, v4 -; GFX6-NEXT: s_or_b64 s[6:7], vcc, s[6:7] -; GFX6-NEXT: s_andn2_b64 exec, exec, s[6:7] -; GFX6-NEXT: s_cbranch_execnz .LBB29_2 -; GFX6-NEXT: ; %bb.3: ; %Flow16 -; GFX6-NEXT: s_or_b64 exec, exec, s[6:7] -; GFX6-NEXT: .LBB29_4: ; %Flow17 +; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v0, v2 +; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5] +; GFX6-NEXT: s_cbranch_execnz .LBB29_1 +; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX6-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX6-NEXT: s_mov_b64 s[6:7], exec -; GFX6-NEXT: v_readfirstlane_b32 s8, v1 ; GFX6-NEXT: v_mbcnt_lo_u32_b32_e64 v1, s6, 0 ; GFX6-NEXT: v_mbcnt_hi_u32_b32_e32 v1, s7, v1 ; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1 ; GFX6-NEXT: s_and_saveexec_b64 s[4:5], vcc -; GFX6-NEXT: s_cbranch_execz .LBB29_7 -; GFX6-NEXT: ; %bb.5: +; GFX6-NEXT: s_cbranch_execz .LBB29_5 +; GFX6-NEXT: ; %bb.3: ; GFX6-NEXT: s_lshl_b32 s3, s3, 4 ; GFX6-NEXT: v_mov_b32_e32 v1, s3 ; GFX6-NEXT: ds_read_b32 v3, v1 @@ -8441,7 +8552,7 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs ; GFX6-NEXT: v_cvt_f32_ubyte0_e32 v2, s3 ; GFX6-NEXT: v_mul_f32_e32 v2, 0x42280000, v2 ; GFX6-NEXT: s_mov_b64 s[6:7], 0 -; GFX6-NEXT: .LBB29_6: ; %atomicrmw.start2 +; GFX6-NEXT: .LBB29_4: ; %atomicrmw.start2 ; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX6-NEXT: s_waitcnt lgkmcnt(0) ; GFX6-NEXT: v_add_f32_e32 v4, v3, v2 @@ -8451,16 +8562,13 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs ; GFX6-NEXT: s_or_b64 s[6:7], vcc, s[6:7] ; GFX6-NEXT: v_mov_b32_e32 v3, v4 ; GFX6-NEXT: s_andn2_b64 exec, exec, s[6:7] -; GFX6-NEXT: s_cbranch_execnz .LBB29_6 -; GFX6-NEXT: .LBB29_7: ; %Flow15 +; GFX6-NEXT: s_cbranch_execnz .LBB29_4 +; GFX6-NEXT: .LBB29_5: ; %Flow15 ; GFX6-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX6-NEXT: v_mov_b32_e32 v2, s2 ; GFX6-NEXT: ds_read_b32 v1, v2 -; GFX6-NEXT: v_cvt_f32_ubyte0_e32 v0, v0 -; GFX6-NEXT: v_mul_f32_e32 v0, 0x42280000, v0 -; GFX6-NEXT: v_add_f32_e32 v0, s8, v0 ; GFX6-NEXT: s_mov_b64 s[2:3], 0 -; GFX6-NEXT: .LBB29_8: ; %atomicrmw.start8 +; GFX6-NEXT: .LBB29_6: ; %atomicrmw.start8 ; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX6-NEXT: s_waitcnt lgkmcnt(0) ; GFX6-NEXT: v_mov_b32_e32 v3, v1 @@ -8470,8 +8578,8 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs ; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v1, v3 ; GFX6-NEXT: s_or_b64 s[2:3], vcc, s[2:3] ; GFX6-NEXT: s_andn2_b64 exec, exec, s[2:3] -; GFX6-NEXT: s_cbranch_execnz .LBB29_8 -; GFX6-NEXT: ; %bb.9: ; %atomicrmw.end7 +; GFX6-NEXT: s_cbranch_execnz .LBB29_6 +; GFX6-NEXT: ; %bb.7: ; %atomicrmw.end7 ; GFX6-NEXT: s_or_b64 exec, exec, s[2:3] ; GFX6-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x0 ; GFX6-NEXT: s_mov_b32 s3, 0xf000