Skip to content

Conversation

rampitec
Copy link
Collaborator

No description provided.

Copy link
Collaborator Author

This stack of pull requests is managed by Graphite. Learn more about stacking.

@rampitec rampitec requested a review from changpeng July 16, 2025 22:22
@rampitec rampitec marked this pull request as ready for review July 16, 2025 22:22
@llvmbot
Copy link
Member

llvmbot commented Jul 16, 2025

@llvm/pr-subscribers-backend-amdgpu

Author: Stanislav Mekhanoshin (rampitec)

Changes

Patch is 57.00 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/149203.diff

2 Files Affected:

  • (modified) llvm/lib/Target/AMDGPU/FLATInstructions.td (+41-22)
  • (added) llvm/test/CodeGen/AMDGPU/flat-saddr-store.ll (+1118)
diff --git a/llvm/lib/Target/AMDGPU/FLATInstructions.td b/llvm/lib/Target/AMDGPU/FLATInstructions.td
index 3965b5dd8c5c3..74632c71f0f95 100644
--- a/llvm/lib/Target/AMDGPU/FLATInstructions.td
+++ b/llvm/lib/Target/AMDGPU/FLATInstructions.td
@@ -1275,8 +1275,8 @@ class FlatLoadSaddrPat <FLAT_Pseudo inst, SDPatternOperator node, ValueType vt>
   (inst $saddr, $voffset, $offset, 0)
 >;
 
-class GlobalStoreSaddrPat <FLAT_Pseudo inst, SDPatternOperator node,
-                           ValueType vt> : GCNPat <
+class FlatStoreSaddrPat <FLAT_Pseudo inst, SDPatternOperator node,
+                         ValueType vt> : GCNPat <
   (node vt:$data, (GlobalSAddr (i64 SReg_64:$saddr), (i32 VGPR_32:$voffset), i32:$offset)),
   (inst $voffset, getVregSrcForVT<vt>.ret:$data, $saddr, $offset)
 >;
@@ -1485,7 +1485,7 @@ multiclass GlobalFLATStorePats<FLAT_Pseudo inst, SDPatternOperator node,
     let AddedComplexity = 10;
   }
 
-  def : GlobalStoreSaddrPat<!cast<FLAT_Pseudo>(!cast<string>(inst)#"_SADDR"), node, vt> {
+  def : FlatStoreSaddrPat<!cast<FLAT_Pseudo>(!cast<string>(inst)#"_SADDR"), node, vt> {
     let AddedComplexity = 11;
   }
 }
@@ -1495,7 +1495,7 @@ multiclass GlobalFLATStorePats_D16_t16<string inst, SDPatternOperator node, Valu
     let AddedComplexity = 10;
   }
 
-  def : GlobalStoreSaddrPat<!cast<FLAT_Pseudo>(inst#"_SADDR_t16"), node, vt> {
+  def : FlatStoreSaddrPat<!cast<FLAT_Pseudo>(inst#"_SADDR_t16"), node, vt> {
     let AddedComplexity = 11;
   }
 }
@@ -1655,6 +1655,24 @@ multiclass FlatLoadPats_D16_t16<FLAT_Pseudo inst, SDPatternOperator node, ValueT
   }
 }
 
+multiclass FlatStorePats<FLAT_Pseudo inst, SDPatternOperator node, ValueType vt> {
+  def : FlatStorePat <inst, node, vt>;
+
+  def : FlatStoreSaddrPat<!cast<FLAT_Pseudo>(!cast<string>(inst)#"_SADDR"), node, vt> {
+    let AddedComplexity = 9;
+    let SubtargetPredicate = HasFlatGVSMode;
+  }
+}
+
+multiclass FlatStorePats_t16<FLAT_Pseudo inst, SDPatternOperator node, ValueType vt> {
+  def : FlatStorePat <!cast<FLAT_Pseudo>(!cast<string>(inst)#"_t16"), node, vt>;
+
+  def : FlatStoreSaddrPat<!cast<FLAT_Pseudo>(!cast<string>(inst)#"_SADDR_t16"), node, vt> {
+    let AddedComplexity = 9;
+    let SubtargetPredicate = HasFlatGVSMode;
+  }
+}
+
 let OtherPredicates = [HasFlatAddressSpace] in {
 
 defm : FlatLoadPats <FLAT_LOAD_UBYTE, atomic_load_aext_8_flat, i32>;
@@ -1682,10 +1700,10 @@ let True16Predicate = p in {
   defm : FlatLoadPats <FLAT_LOAD_UBYTE, atomic_load_zext_8_flat, i16>;
   defm : FlatLoadPats <FLAT_LOAD_USHORT, atomic_load_nonext_16_flat, i16>;
   defm : FlatLoadPats <FLAT_LOAD_SBYTE, atomic_load_sext_8_flat, i16>;
-  def : FlatStorePat <FLAT_STORE_BYTE, truncstorei8_flat, i16>;
-  def : FlatStorePat <FLAT_STORE_SHORT, store_flat, i16>;
-  def : FlatStorePat <FLAT_STORE_BYTE, atomic_store_8_flat, i16>;
-  def : FlatStorePat <FLAT_STORE_SHORT, atomic_store_16_flat, i16>;
+  defm : FlatStorePats <FLAT_STORE_BYTE, truncstorei8_flat, i16>;
+  defm : FlatStorePats <FLAT_STORE_SHORT, store_flat, i16>;
+  defm : FlatStorePats <FLAT_STORE_BYTE, atomic_store_8_flat, i16>;
+  defm : FlatStorePats <FLAT_STORE_SHORT, atomic_store_16_flat, i16>;
 }
 
 let OtherPredicates = [D16PreservesUnusedBits, HasFlatAddressSpace], True16Predicate = UseRealTrue16Insts in {
@@ -1697,8 +1715,8 @@ let OtherPredicates = [D16PreservesUnusedBits, HasFlatAddressSpace], True16Predi
   defm : FlatLoadPats_D16_t16<FLAT_LOAD_UBYTE_D16_t16, atomic_load_zext_8_flat, i16>;
   defm : FlatLoadPats_D16_t16<FLAT_LOAD_SHORT_D16_t16, atomic_load_nonext_16_flat, i16>;
   defm : FlatLoadPats_D16_t16<FLAT_LOAD_SBYTE_D16_t16, atomic_load_sext_8_flat, i16>;
-  def : FlatStorePat <FLAT_STORE_BYTE_t16, truncstorei8_flat, i16>;
-  def : FlatStorePat <FLAT_STORE_SHORT_t16, store_flat, i16>;
+  defm : FlatStorePats_t16 <FLAT_STORE_BYTE, truncstorei8_flat, i16>;
+  defm : FlatStorePats_t16 <FLAT_STORE_SHORT, store_flat, i16>;
   def : FlatStorePat <FLAT_STORE_BYTE_t16, atomic_store_8_flat, i16>;
   def : FlatStorePat <FLAT_STORE_SHORT_t16, atomic_store_16_flat, i16>;
 } // End let OtherPredicates = [D16PreservesUnusedBits, HasFlatAddressSpace], True16Predicate = UseRealTrue16Insts
@@ -1706,30 +1724,31 @@ let OtherPredicates = [D16PreservesUnusedBits, HasFlatAddressSpace], True16Predi
 defm : FlatLoadPats <FLAT_LOAD_DWORD, atomic_load_nonext_32_flat, i32>;
 defm : FlatLoadPats <FLAT_LOAD_DWORDX2, atomic_load_nonext_64_flat, i64>;
 
-def : FlatStorePat <FLAT_STORE_BYTE, truncstorei8_flat, i32>;
-def : FlatStorePat <FLAT_STORE_SHORT, truncstorei16_flat, i32>;
+defm : FlatStorePats <FLAT_STORE_BYTE, truncstorei8_flat, i32>;
+defm : FlatStorePats <FLAT_STORE_SHORT, truncstorei16_flat, i32>;
 
 foreach vt = Reg32Types.types in {
 defm : FlatLoadPats <FLAT_LOAD_DWORD, load_flat, vt>;
-def : FlatStorePat <FLAT_STORE_DWORD, store_flat, vt>;
+defm : FlatStorePats <FLAT_STORE_DWORD, store_flat, vt>;
 }
 
 foreach vt = VReg_64.RegTypes in {
-def : FlatStorePat <FLAT_STORE_DWORDX2, store_flat, vt>;
+defm : FlatStorePats <FLAT_STORE_DWORDX2, store_flat, vt>;
 def : FlatLoadPat <FLAT_LOAD_DWORDX2, load_flat, vt>;
 }
 
-def : FlatStorePat <FLAT_STORE_DWORDX3, store_flat, v3i32>;
+defm : FlatStorePats <FLAT_STORE_DWORDX3, store_flat, v3i32>;
 
 foreach vt = VReg_128.RegTypes in {
 defm : FlatLoadPats <FLAT_LOAD_DWORDX4, load_flat, vt>;
-def : FlatStorePat <FLAT_STORE_DWORDX4, store_flat, vt>;
+defm : FlatStorePats <FLAT_STORE_DWORDX4, store_flat, vt>;
 }
 
-def : FlatStorePat <FLAT_STORE_DWORD, atomic_store_32_flat, i32>;
-def : FlatStorePat <FLAT_STORE_DWORDX2, atomic_store_64_flat, i64>;
-def : FlatStorePat <FLAT_STORE_BYTE, atomic_store_8_flat, i32>;
-def : FlatStorePat <FLAT_STORE_SHORT, atomic_store_16_flat, i32>;
+defm : FlatStorePats <FLAT_STORE_DWORD, atomic_store_32_flat, i32>;
+defm : FlatStorePats <FLAT_STORE_DWORDX2, atomic_store_64_flat, i64>;
+defm : FlatStorePats <FLAT_STORE_BYTE, atomic_store_8_flat, i32>;
+defm : FlatStorePats <FLAT_STORE_SHORT, atomic_store_16_flat, i32>;
+
 
 foreach as = [ "flat", "global" ] in {
 defm : FlatAtomicPat <"FLAT_ATOMIC_ADD", "atomic_load_add_"#as, i32>;
@@ -1780,8 +1799,8 @@ let SubtargetPredicate = isGFX12Plus in {
 }
 
 let OtherPredicates = [HasD16LoadStore] in {
-def : FlatStorePat <FLAT_STORE_SHORT_D16_HI, truncstorei16_hi16_flat, i32>;
-def : FlatStorePat <FLAT_STORE_BYTE_D16_HI, truncstorei8_hi16_flat, i32>;
+defm : FlatStorePats <FLAT_STORE_SHORT_D16_HI, truncstorei16_hi16_flat, i32>;
+defm : FlatStorePats <FLAT_STORE_BYTE_D16_HI, truncstorei8_hi16_flat, i32>;
 }
 
 let OtherPredicates = [D16PreservesUnusedBits] in {
diff --git a/llvm/test/CodeGen/AMDGPU/flat-saddr-store.ll b/llvm/test/CodeGen/AMDGPU/flat-saddr-store.ll
new file mode 100644
index 0000000000000..32888d2acf1cd
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/flat-saddr-store.ll
@@ -0,0 +1,1118 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1250 < %s | FileCheck -check-prefixes=GFX1250,GFX1250-SDAG %s
+; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1250 < %s | FileCheck -check-prefixes=GFX1250,GFX1250-GISEL %s
+
+; Test using saddr addressing mode of flat_*store_* instructions.
+
+define amdgpu_ps void @flat_store_saddr_i8_zext_vgpr(ptr inreg %sbase, ptr %voffset.ptr, i8 %data) {
+; GFX1250-LABEL: flat_store_saddr_i8_zext_vgpr:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    flat_load_b32 v0, v[0:1]
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    flat_store_b8 v0, v2, s[2:3]
+; GFX1250-NEXT:    s_endpgm
+  %voffset = load i32, ptr %voffset.ptr
+  %zext.offset = zext i32 %voffset to i64
+  %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+  store i8 %data, ptr %gep0
+  ret void
+}
+
+; Maximum positive offset on gfx10
+define amdgpu_ps void @flat_store_saddr_i8_zext_vgpr_offset_2047(ptr inreg %sbase, ptr %voffset.ptr, i8 %data) {
+; GFX1250-LABEL: flat_store_saddr_i8_zext_vgpr_offset_2047:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    flat_load_b32 v0, v[0:1]
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    flat_store_b8 v0, v2, s[2:3] offset:2047
+; GFX1250-NEXT:    s_endpgm
+  %voffset = load i32, ptr %voffset.ptr
+  %zext.offset = zext i32 %voffset to i64
+  %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+  %gep1 = getelementptr inbounds i8, ptr %gep0, i64 2047
+  store i8 %data, ptr %gep1
+  ret void
+}
+
+; Maximum negative offset on gfx10
+define amdgpu_ps void @flat_store_saddr_i8_zext_vgpr_offset_neg2048(ptr inreg %sbase, ptr %voffset.ptr, i8 %data) {
+; GFX1250-LABEL: flat_store_saddr_i8_zext_vgpr_offset_neg2048:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    flat_load_b32 v0, v[0:1]
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    flat_store_b8 v0, v2, s[2:3] offset:-2048
+; GFX1250-NEXT:    s_endpgm
+  %voffset = load i32, ptr %voffset.ptr
+  %zext.offset = zext i32 %voffset to i64
+  %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+  %gep1 = getelementptr inbounds i8, ptr %gep0, i64 -2048
+  store i8 %data, ptr %gep1
+  ret void
+}
+
+; --------------------------------------------------------------------------------
+; Uniformity edge cases
+; --------------------------------------------------------------------------------
+
+@ptr.in.lds = internal addrspace(3) global ptr undef
+
+; Base pointer is uniform, but also in VGPRs
+define amdgpu_ps void @flat_store_saddr_uniform_ptr_in_vgprs(i32 %voffset, i8 %data) {
+; GFX1250-SDAG-LABEL: flat_store_saddr_uniform_ptr_in_vgprs:
+; GFX1250-SDAG:       ; %bb.0:
+; GFX1250-SDAG-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1250-SDAG-NEXT:    ds_load_b64 v[2:3], v2
+; GFX1250-SDAG-NEXT:    s_wait_dscnt 0x0
+; GFX1250-SDAG-NEXT:    v_readfirstlane_b32 s0, v2
+; GFX1250-SDAG-NEXT:    v_readfirstlane_b32 s1, v3
+; GFX1250-SDAG-NEXT:    flat_store_b8 v0, v1, s[0:1]
+; GFX1250-SDAG-NEXT:    s_endpgm
+;
+; GFX1250-GISEL-LABEL: flat_store_saddr_uniform_ptr_in_vgprs:
+; GFX1250-GISEL:       ; %bb.0:
+; GFX1250-GISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1250-GISEL-NEXT:    ds_load_b64 v[2:3], v2
+; GFX1250-GISEL-NEXT:    s_wait_dscnt 0x0
+; GFX1250-GISEL-NEXT:    v_add_co_u32 v2, vcc_lo, v2, v0
+; GFX1250-GISEL-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT:    v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
+; GFX1250-GISEL-NEXT:    flat_store_b8 v[2:3], v1
+; GFX1250-GISEL-NEXT:    s_endpgm
+  %sbase = load ptr, ptr addrspace(3) @ptr.in.lds
+  %zext.offset = zext i32 %voffset to i64
+  %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+  store i8 %data, ptr %gep0
+  ret void
+}
+
+; Base pointer is uniform, but also in VGPRs, with imm offset
+define amdgpu_ps void @flat_store_saddr_uniform_ptr_in_vgprs_immoffset(i32 %voffset, i8 %data) {
+; GFX1250-SDAG-LABEL: flat_store_saddr_uniform_ptr_in_vgprs_immoffset:
+; GFX1250-SDAG:       ; %bb.0:
+; GFX1250-SDAG-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1250-SDAG-NEXT:    ds_load_b64 v[2:3], v2
+; GFX1250-SDAG-NEXT:    s_wait_dscnt 0x0
+; GFX1250-SDAG-NEXT:    v_readfirstlane_b32 s0, v2
+; GFX1250-SDAG-NEXT:    v_readfirstlane_b32 s1, v3
+; GFX1250-SDAG-NEXT:    flat_store_b8 v0, v1, s[0:1] offset:-120
+; GFX1250-SDAG-NEXT:    s_endpgm
+;
+; GFX1250-GISEL-LABEL: flat_store_saddr_uniform_ptr_in_vgprs_immoffset:
+; GFX1250-GISEL:       ; %bb.0:
+; GFX1250-GISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1250-GISEL-NEXT:    ds_load_b64 v[2:3], v2
+; GFX1250-GISEL-NEXT:    s_wait_dscnt 0x0
+; GFX1250-GISEL-NEXT:    v_add_co_u32 v2, vcc_lo, v2, v0
+; GFX1250-GISEL-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT:    v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
+; GFX1250-GISEL-NEXT:    flat_store_b8 v[2:3], v1 offset:-120
+; GFX1250-GISEL-NEXT:    s_endpgm
+  %sbase = load ptr, ptr addrspace(3) @ptr.in.lds
+  %zext.offset = zext i32 %voffset to i64
+  %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+  %gep1 = getelementptr inbounds i8, ptr %gep0, i64 -120
+  store i8 %data, ptr %gep1
+  ret void
+}
+
+; --------------------------------------------------------------------------------
+; Stress various type stores
+; --------------------------------------------------------------------------------
+
+define amdgpu_ps void @flat_store_saddr_i16_zext_vgpr(ptr inreg %sbase, i32 %voffset, i16 %data) {
+; GFX1250-LABEL: flat_store_saddr_i16_zext_vgpr:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    flat_store_b16 v0, v1, s[2:3]
+; GFX1250-NEXT:    s_endpgm
+  %zext.offset = zext i32 %voffset to i64
+  %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+  store i16 %data, ptr %gep0
+  ret void
+}
+
+define amdgpu_ps void @flat_store_saddr_i16_zext_vgpr_offset_neg128(ptr inreg %sbase, i32 %voffset, i16 %data) {
+; GFX1250-LABEL: flat_store_saddr_i16_zext_vgpr_offset_neg128:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    flat_store_b16 v0, v1, s[2:3] offset:-128
+; GFX1250-NEXT:    s_endpgm
+  %zext.offset = zext i32 %voffset to i64
+  %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+  %gep1 = getelementptr inbounds i8, ptr %gep0, i64 -128
+  store i16 %data, ptr %gep1
+  ret void
+}
+
+define amdgpu_ps void @flat_store_saddr_f16_zext_vgpr(ptr inreg %sbase, i32 %voffset, half %data) {
+; GFX1250-LABEL: flat_store_saddr_f16_zext_vgpr:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    flat_store_b16 v0, v1, s[2:3]
+; GFX1250-NEXT:    s_endpgm
+  %zext.offset = zext i32 %voffset to i64
+  %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+  store half %data, ptr %gep0
+  ret void
+}
+
+define amdgpu_ps void @flat_store_saddr_f16_zext_vgpr_offset_neg128(ptr inreg %sbase, i32 %voffset, half %data) {
+; GFX1250-LABEL: flat_store_saddr_f16_zext_vgpr_offset_neg128:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    flat_store_b16 v0, v1, s[2:3] offset:-128
+; GFX1250-NEXT:    s_endpgm
+  %zext.offset = zext i32 %voffset to i64
+  %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+  %gep1 = getelementptr inbounds i8, ptr %gep0, i64 -128
+  store half %data, ptr %gep1
+  ret void
+}
+
+define amdgpu_ps void @flat_store_saddr_i32_zext_vgpr(ptr inreg %sbase, i32 %voffset, i32 %data) {
+; GFX1250-LABEL: flat_store_saddr_i32_zext_vgpr:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    flat_store_b32 v0, v1, s[2:3]
+; GFX1250-NEXT:    s_endpgm
+  %zext.offset = zext i32 %voffset to i64
+  %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+  store i32 %data, ptr %gep0
+  ret void
+}
+
+define amdgpu_ps void @flat_store_saddr_i32_zext_vgpr_offset_neg128(ptr inreg %sbase, i32 %voffset, i32 %data) {
+; GFX1250-LABEL: flat_store_saddr_i32_zext_vgpr_offset_neg128:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    flat_store_b32 v0, v1, s[2:3] offset:-128
+; GFX1250-NEXT:    s_endpgm
+  %zext.offset = zext i32 %voffset to i64
+  %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+  %gep1 = getelementptr inbounds i8, ptr %gep0, i64 -128
+  store i32 %data, ptr %gep1
+  ret void
+}
+
+define amdgpu_ps void @flat_store_saddr_f32_zext_vgpr(ptr inreg %sbase, i32 %voffset, float %data) {
+; GFX1250-LABEL: flat_store_saddr_f32_zext_vgpr:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    flat_store_b32 v0, v1, s[2:3]
+; GFX1250-NEXT:    s_endpgm
+  %zext.offset = zext i32 %voffset to i64
+  %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+  store float %data, ptr %gep0
+  ret void
+}
+
+define amdgpu_ps void @flat_store_saddr_f32_zext_vgpr_offset_neg128(ptr inreg %sbase, i32 %voffset, float %data) {
+; GFX1250-LABEL: flat_store_saddr_f32_zext_vgpr_offset_neg128:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    flat_store_b32 v0, v1, s[2:3] offset:-128
+; GFX1250-NEXT:    s_endpgm
+  %zext.offset = zext i32 %voffset to i64
+  %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+  %gep1 = getelementptr inbounds i8, ptr %gep0, i64 -128
+  store float %data, ptr %gep1
+  ret void
+}
+
+define amdgpu_ps void @flat_store_saddr_p3_zext_vgpr(ptr inreg %sbase, i32 %voffset, ptr addrspace(3) %data) {
+; GFX1250-LABEL: flat_store_saddr_p3_zext_vgpr:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    flat_store_b32 v0, v1, s[2:3]
+; GFX1250-NEXT:    s_endpgm
+  %zext.offset = zext i32 %voffset to i64
+  %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+  store ptr addrspace(3) %data, ptr %gep0
+  ret void
+}
+
+define amdgpu_ps void @flat_store_saddr_p3_zext_vgpr_offset_neg128(ptr inreg %sbase, i32 %voffset, ptr addrspace(3) %data) {
+; GFX1250-LABEL: flat_store_saddr_p3_zext_vgpr_offset_neg128:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    flat_store_b32 v0, v1, s[2:3] offset:-128
+; GFX1250-NEXT:    s_endpgm
+  %zext.offset = zext i32 %voffset to i64
+  %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+  %gep1 = getelementptr inbounds i8, ptr %gep0, i64 -128
+  store ptr addrspace(3) %data, ptr %gep1
+  ret void
+}
+
+define amdgpu_ps void @flat_store_saddr_i64_zext_vgpr(ptr inreg %sbase, i32 %voffset, i64 %data) {
+; GFX1250-SDAG-LABEL: flat_store_saddr_i64_zext_vgpr:
+; GFX1250-SDAG:       ; %bb.0:
+; GFX1250-SDAG-NEXT:    v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-SDAG-NEXT:    flat_store_b64 v0, v[2:3], s[2:3]
+; GFX1250-SDAG-NEXT:    s_endpgm
+;
+; GFX1250-GISEL-LABEL: flat_store_saddr_i64_zext_vgpr:
+; GFX1250-GISEL:       ; %bb.0:
+; GFX1250-GISEL-NEXT:    v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2
+; GFX1250-GISEL-NEXT:    flat_store_b64 v0, v[4:5], s[2:3]
+; GFX1250-GISEL-NEXT:    s_endpgm
+  %zext.offset = zext i32 %voffset to i64
+  %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+  store i64 %data, ptr %gep0
+  ret void
+}
+
+define amdgpu_ps void @flat_store_saddr_i64_zext_vgpr_offset_neg128(ptr inreg %sbase, i32 %voffset, i64 %data) {
+; GFX1250-SDAG-LABEL: flat_store_saddr_i64_zext_vgpr_offset_neg128:
+; GFX1250-SDAG:       ; %bb.0:
+; GFX1250-SDAG-NEXT:    v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-SDAG-NEXT:    flat_store_b64 v0, v[2:3], s[2:3] offset:-128
+; GFX1250-SDAG-NEXT:    s_endpgm
+;
+; GFX1250-GISEL-LABEL: flat_store_saddr_i64_zext_vgpr_offset_neg128:
+; GFX1250-GISEL:       ; %bb.0:
+; GFX1250-GISEL-NEXT:    v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2
+; GFX1250-GISEL-NEXT:    flat_store_b64 v0, v[4:5], s[2:3] offset:-128
+; GFX1250-GISEL-NEXT:    s_endpgm
+  %zext.offset = zext i32 %voffset to i64
+  %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+  %gep1 = getelementptr inbounds i8, ptr %gep0, i64 -128
+  store i64 %data, ptr %gep1
+  ret void
+}
+
+define amdgpu_ps void @flat_store_saddr_f64_zext_vgpr(ptr inreg %sbase, i32 %voffset, double %data) {
+; GFX1250-SDAG-LABEL: flat_store_saddr_f64_zext_vgpr:
+; GFX1250-SDAG:       ; %bb.0:
+; GFX1250-SDAG-NEXT:    v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-SDAG-NEXT:    flat_store_b64 v0, v[2:3], s[2:3]
+; GFX1250-SDAG-NEXT:    s_endpgm
+;
+; GFX1250-GISEL-LABEL: flat_store_saddr_f64_zext_vgpr:
+; GFX1250-GISEL:       ; %bb.0:
+; GFX1250-GISEL-NEXT:    v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2
+; GFX1250-GISEL-NEXT:    flat_store_b64 v0, v[4:5], s[2:3]
+; GFX1250-GISEL-NEXT:    s_endpgm
+  %zext.offset = zext i32 %voffset to i64
+  %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+  store double %data, ptr %gep0
+  ret void
+}
+
+define amdgpu_ps void @flat_store_saddr_f64_zext_vgpr_offset_neg128(ptr inreg %sbase, i32 %voffset, double %data) {
+; GFX1250-SDAG-LABEL: flat_store_saddr_f64_zext_vgpr_offset_neg128:
+; GFX1250-SDAG:       ; %bb.0:
+; GFX1250-SDAG-NEXT:    v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-SDAG-NEXT:    flat_store_b64 v0, v[2:3], s[2:3] offset:-128
+; GFX1250-SDAG-NEXT:    s_endpgm
+;
+; GFX1250-GISEL-LABEL: flat_store_saddr_f64_zext_vgpr_offset_neg128:
+; GFX1250-GISEL:       ; %bb.0:
+; GFX1250-GISEL-NEXT:    v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2
+; GFX1250-GISEL-NEXT:    flat_store_b64 v0, v[4:5], s[2:3] offset:-128
+; GFX1250-GISEL-NEXT:    s_endpgm
+  %zext.offset = zext i32 %voffset to i64
+  %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+  %gep1 = getelementptr inbounds i8, ptr %gep0, i64 -128
+  store double %data, ptr %gep1
+  ret void
+}
+
+define amdgpu_ps void @flat_store_saddr_v2i32_zext_vgpr(ptr inreg %sbase, i32 %voffset, <2 x i32> %data) {
+; GFX1250-SDAG-LABEL: flat_store_saddr_v2i32_zext_vgpr:
+; GFX1250-SDAG:       ; %bb.0:
+; GFX12...
[truncated]

@rampitec rampitec merged commit 26b0b27 into main Jul 16, 2025
13 checks passed
@rampitec rampitec deleted the users/rampitec/07-16-_amdgpu_select_flat_gvs_stores_on_gfx1250 branch July 16, 2025 23:36
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment

Projects

None yet

Development

Successfully merging this pull request may close these issues.

3 participants