|
| 1 | +# RUN: not llc --crash -mtriple=amdgcn-amd-amdpal -mcpu=gfx1030 -run-pass=greedy -filetype=null %s |
| 2 | + |
| 3 | +; This test would crash while trying to split a liverange during register allocator. |
| 4 | + |
| 5 | +--- |
| 6 | +name: test_kernel |
| 7 | +tracksRegLiveness: true |
| 8 | +registers: |
| 9 | + - { id: 0, class: vgpr_32 } |
| 10 | + - { id: 1, class: vreg_64 } |
| 11 | + - { id: 2, class: sgpr_32 } |
| 12 | + - { id: 3, class: sreg_32 } |
| 13 | + - { id: 4, class: sreg_32 } |
| 14 | + - { id: 5, class: sreg_32 } |
| 15 | + - { id: 6, class: sgpr_256 } |
| 16 | + - { id: 7, class: sgpr_256 } |
| 17 | + - { id: 8, class: sgpr_256 } |
| 18 | + - { id: 9, class: sgpr_256 } |
| 19 | + - { id: 10, class: sgpr_256 } |
| 20 | + - { id: 11, class: sreg_32_xm0_xexec } |
| 21 | + - { id: 12, class: sreg_32_xm0_xexec } |
| 22 | + - { id: 13, class: sgpr_64 } |
| 23 | + - { id: 14, class: sreg_32_xm0_xexec } |
| 24 | + - { id: 15, class: sreg_32 } |
| 25 | + - { id: 16, class: sreg_32 } |
| 26 | + - { id: 17, class: sreg_32 } |
| 27 | + - { id: 18, class: sreg_32 } |
| 28 | + - { id: 19, class: sreg_32 } |
| 29 | + - { id: 20, class: sreg_32 } |
| 30 | + - { id: 21, class: sreg_32 } |
| 31 | + - { id: 22, class: sreg_32 } |
| 32 | + - { id: 23, class: sreg_32 } |
| 33 | + - { id: 24, class: sreg_32 } |
| 34 | + - { id: 25, class: sreg_32 } |
| 35 | + - { id: 26, class: sreg_32 } |
| 36 | + - { id: 27, class: sreg_32 } |
| 37 | + - { id: 28, class: sreg_32 } |
| 38 | + - { id: 29, class: sreg_32 } |
| 39 | + - { id: 30, class: sreg_32 } |
| 40 | + - { id: 31, class: sreg_32 } |
| 41 | + - { id: 32, class: sreg_32 } |
| 42 | + - { id: 33, class: sreg_32 } |
| 43 | + - { id: 34, class: sreg_32 } |
| 44 | + - { id: 35, class: sreg_32 } |
| 45 | + - { id: 36, class: sreg_32 } |
| 46 | + - { id: 37, class: sreg_32 } |
| 47 | + - { id: 38, class: sreg_32 } |
| 48 | + - { id: 39, class: sreg_32 } |
| 49 | + - { id: 40, class: sreg_32 } |
| 50 | + - { id: 41, class: sreg_32 } |
| 51 | + - { id: 42, class: sreg_32 } |
| 52 | + - { id: 43, class: sreg_32 } |
| 53 | + - { id: 44, class: sreg_32 } |
| 54 | + - { id: 45, class: sreg_32 } |
| 55 | + - { id: 46, class: sreg_32 } |
| 56 | + - { id: 47, class: sreg_32 } |
| 57 | + - { id: 48, class: sreg_32 } |
| 58 | + - { id: 49, class: sreg_32 } |
| 59 | + - { id: 50, class: sreg_32 } |
| 60 | + - { id: 51, class: sreg_32 } |
| 61 | + - { id: 52, class: sreg_32 } |
| 62 | + - { id: 53, class: sreg_32 } |
| 63 | + - { id: 54, class: sreg_32 } |
| 64 | + - { id: 55, class: sreg_32 } |
| 65 | + - { id: 56, class: sreg_32 } |
| 66 | + - { id: 57, class: sreg_32 } |
| 67 | + - { id: 58, class: sreg_32 } |
| 68 | + - { id: 59, class: sreg_32 } |
| 69 | + - { id: 60, class: sreg_32 } |
| 70 | + - { id: 61, class: sreg_32 } |
| 71 | + - { id: 62, class: sreg_32 } |
| 72 | + - { id: 63, class: sreg_32 } |
| 73 | + - { id: 64, class: sreg_32 } |
| 74 | + - { id: 65, class: sreg_32 } |
| 75 | + - { id: 66, class: sreg_32 } |
| 76 | + - { id: 67, class: sreg_32 } |
| 77 | + - { id: 68, class: sreg_32 } |
| 78 | + - { id: 69, class: sreg_32 } |
| 79 | + - { id: 70, class: sreg_32 } |
| 80 | + - { id: 71, class: sreg_32 } |
| 81 | + - { id: 72, class: sreg_32 } |
| 82 | + - { id: 73, class: sreg_32 } |
| 83 | + - { id: 74, class: sreg_32 } |
| 84 | + - { id: 75, class: sreg_32 } |
| 85 | + - { id: 76, class: sreg_32 } |
| 86 | + - { id: 77, class: sreg_32 } |
| 87 | + - { id: 78, class: sreg_32 } |
| 88 | + - { id: 79, class: sreg_32 } |
| 89 | + - { id: 80, class: sreg_32 } |
| 90 | + - { id: 81, class: sreg_32 } |
| 91 | + - { id: 82, class: sreg_32 } |
| 92 | + - { id: 83, class: sreg_32 } |
| 93 | + - { id: 84, class: sreg_32 } |
| 94 | + - { id: 85, class: sreg_32 } |
| 95 | + - { id: 86, class: sreg_32 } |
| 96 | + - { id: 87, class: sreg_32 } |
| 97 | + - { id: 88, class: sreg_32 } |
| 98 | + - { id: 89, class: sreg_32 } |
| 99 | + - { id: 90, class: sreg_32 } |
| 100 | + - { id: 91, class: sreg_32 } |
| 101 | + - { id: 92, class: sreg_32 } |
| 102 | + - { id: 93, class: sgpr_64 } |
| 103 | + - { id: 94, class: sreg_32_xm0_xexec } |
| 104 | + - { id: 95, class: sgpr_32 } |
| 105 | + - { id: 96, class: sreg_32_xm0_xexec } |
| 106 | + - { id: 97, class: sreg_64 } |
| 107 | + - { id: 98, class: sreg_32_xm0_xexec } |
| 108 | + - { id: 99, class: sreg_32_xm0_xexec } |
| 109 | + - { id: 100, class: sreg_64 } |
| 110 | + - { id: 101, class: sgpr_128 } |
| 111 | + - { id: 102, class: sreg_64_xexec } |
| 112 | + - { id: 103, class: sgpr_32 } |
| 113 | + - { id: 104, class: sgpr_64 } |
| 114 | + - { id: 105, class: sgpr_64 } |
| 115 | + - { id: 106, class: sgpr_64 } |
| 116 | + - { id: 107, class: sreg_32, preferred-register: '$vcc' } |
| 117 | + - { id: 108, class: sreg_32, preferred-register: '$vcc' } |
| 118 | + - { id: 109, class: sgpr_32 } |
| 119 | + - { id: 110, class: sgpr_256 } |
| 120 | + - { id: 111, class: sgpr_512 } |
| 121 | + - { id: 112, class: sgpr_512 } |
| 122 | + - { id: 113, class: sgpr_256 } |
| 123 | + - { id: 114, class: sgpr_256 } |
| 124 | + - { id: 115, class: sgpr_256 } |
| 125 | + - { id: 116, class: sreg_32_xm0_xexec } |
| 126 | +machineFunctionInfo: |
| 127 | + maxKernArgAlign: 1 |
| 128 | + isEntryFunction: true |
| 129 | + stackPtrOffsetReg: '$sgpr32' |
| 130 | + sgprForEXECCopy: '$sgpr105' |
| 131 | +body: | |
| 132 | + bb.0: |
| 133 | + successors: %bb.1, %bb.2 |
| 134 | + liveins: $sgpr0, $sgpr1, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13 |
| 135 | + |
| 136 | + %0:vgpr_32 = IMPLICIT_DEF |
| 137 | + undef %1.sub1:vreg_64 = IMPLICIT_DEF |
| 138 | + %109:sgpr_32 = COPY undef $sgpr1 |
| 139 | + undef %93.sub1:sgpr_64 = COPY undef $sgpr0 |
| 140 | + undef %106.sub0:sgpr_64 = V_READFIRSTLANE_B32 undef %0, implicit $exec |
| 141 | + %106.sub1:sgpr_64 = V_READFIRSTLANE_B32 undef %0, implicit $exec |
| 142 | + undef %105.sub0:sgpr_64 = V_READFIRSTLANE_B32 undef %0, implicit $exec |
| 143 | + %105.sub1:sgpr_64 = IMPLICIT_DEF |
| 144 | + undef %104.sub0:sgpr_64 = V_READFIRSTLANE_B32 undef %0, implicit $exec |
| 145 | + %104.sub1:sgpr_64 = V_READFIRSTLANE_B32 undef %0, implicit $exec |
| 146 | + %4:sreg_32 = S_MOV_B32 0 |
| 147 | + %5:sreg_32 = S_MOV_B32 0 |
| 148 | + S_CBRANCH_SCC1 %bb.2, implicit undef $scc |
| 149 | + S_BRANCH %bb.1 |
| 150 | + |
| 151 | + bb.1: |
| 152 | + %5:sreg_32 = IMPLICIT_DEF |
| 153 | + |
| 154 | + bb.2: |
| 155 | + successors: %bb.3, %bb.4 |
| 156 | + |
| 157 | + %101:sgpr_128 = S_LOAD_DWORDX4_IMM undef %104, 132, 0 :: ("amdgpu-noclobber" load (s128), align 8, addrspace 1) |
| 158 | + %10:sgpr_256 = S_LOAD_DWORDX8_IMM undef %104, 188, 0 :: ("amdgpu-noclobber" load (s256), align 8, addrspace 1) |
| 159 | + %100:sreg_64 = S_MOV_B64 0 |
| 160 | + S_CBRANCH_SCC1 %bb.4, implicit undef $scc |
| 161 | + S_BRANCH %bb.3 |
| 162 | + |
| 163 | + bb.3: |
| 164 | + %4:sreg_32 = S_MOV_B32 -1 |
| 165 | + |
| 166 | + bb.4: |
| 167 | + successors: %bb.5, %bb.6 |
| 168 | + |
| 169 | + %102:sreg_64_xexec = S_LOAD_DWORDX2_IMM undef %104, 120, 0 :: ("amdgpu-noclobber" load (s64), align 16, addrspace 1) |
| 170 | + %8:sgpr_256 = S_LOAD_DWORDX8_IMM undef %104, 352, 0 :: ("amdgpu-noclobber" load (s256), align 16, addrspace 1) |
| 171 | + %98:sreg_32_xm0_xexec = S_LOAD_DWORD_IMM undef %97:sreg_64, 0, 0 |
| 172 | + %7:sgpr_256 = S_LOAD_DWORDX8_IMM undef %104, 652, 0 :: ("amdgpu-noclobber" load (s256), align 8, addrspace 1) |
| 173 | + %96:sreg_32_xm0_xexec = S_LOAD_DWORD_IMM %100, 0, 0 :: ("amdgpu-noclobber" load (s32), align 8, addrspace 1) |
| 174 | + %6:sgpr_256 = S_LOAD_DWORDX8_IMM %104, 688, 0 :: ("amdgpu-noclobber" load (s256), align 16, addrspace 1) |
| 175 | + %2:sgpr_32 = S_MOV_B32 0 |
| 176 | + %3:sreg_32 = S_MOV_B32 0 |
| 177 | + S_CBRANCH_SCC1 %bb.6, implicit undef $scc |
| 178 | + S_BRANCH %bb.5 |
| 179 | + |
| 180 | + bb.5: |
| 181 | + %3:sreg_32 = S_MOV_B32 -1 |
| 182 | + |
| 183 | + bb.6: |
| 184 | + successors: %bb.7, %bb.10 |
| 185 | + |
| 186 | + %103:sgpr_32 = S_LOAD_DWORD_IMM undef %104, 0, 0 :: ("amdgpu-noclobber" load (s32), align 16, addrspace 1) |
| 187 | + %115:sgpr_256 = S_LOAD_DWORDX8_IMM undef %104, 152, 0 :: ("amdgpu-noclobber" load (s256), align 4, addrspace 1) |
| 188 | + %114:sgpr_256 = S_LOAD_DWORDX8_IMM undef %104, 220, 0 :: ("amdgpu-noclobber" load (s256), align 4, addrspace 1) |
| 189 | + %113:sgpr_256 = S_LOAD_DWORDX8_IMM undef %104, 384, 0 :: ("amdgpu-noclobber" load (s256), align 4, addrspace 1) |
| 190 | + %112:sgpr_512 = S_LOAD_DWORDX16_IMM undef %104, 440, 0 :: ("amdgpu-noclobber" load (s512), align 8, addrspace 1) |
| 191 | + %111:sgpr_512 = S_LOAD_DWORDX16_IMM undef %104, 584, 0 :: ("amdgpu-noclobber" load (s512), align 16, addrspace 1) |
| 192 | + %110:sgpr_256 = S_LOAD_DWORDX8_IMM %106, 156, 0 :: ("amdgpu-noclobber" load (s256), align 8, addrspace 1) |
| 193 | + %95:sgpr_32 = S_LOAD_DWORD_IMM %105, 0, 0 :: ("amdgpu-noclobber" load (s32), align 8, addrspace 1) |
| 194 | + %94:sreg_32_xm0_xexec = S_LOAD_DWORD_IMM %106, 0, 0 :: ("amdgpu-noclobber" load (s32), addrspace 1) |
| 195 | + %99:sreg_32_xm0_xexec = S_LOAD_DWORD_IMM %100, 0, 0 :: ("amdgpu-noclobber" load (s32), addrspace 1) |
| 196 | + %107:sreg_32 = IMPLICIT_DEF |
| 197 | + %108:sreg_32 = IMPLICIT_DEF |
| 198 | + %93.sub0:sgpr_64 = S_MOV_B32 1 |
| 199 | + S_CBRANCH_SCC1 %bb.10, implicit undef $scc |
| 200 | + S_BRANCH %bb.7 |
| 201 | + |
| 202 | + bb.7: |
| 203 | + successors: %bb.8, %bb.9 |
| 204 | + |
| 205 | + undef %13.sub0:sgpr_64 = V_READFIRSTLANE_B32 undef %1.sub0, implicit $exec |
| 206 | + %13.sub1:sgpr_64 = V_READFIRSTLANE_B32 undef %1.sub1, implicit $exec |
| 207 | + %92:sreg_32 = IMPLICIT_DEF |
| 208 | + %2:sgpr_32 = S_MOV_B32 0 |
| 209 | + $vcc = COPY %92 |
| 210 | + S_CBRANCH_VCCNZ %bb.9, implicit $vcc |
| 211 | + S_BRANCH %bb.8 |
| 212 | + |
| 213 | + bb.8: |
| 214 | + %2:sgpr_32 = S_MOV_B32 -1 |
| 215 | + |
| 216 | + bb.9: |
| 217 | + |
| 218 | + bb.10: |
| 219 | + successors: %bb.11, %bb.12 |
| 220 | + |
| 221 | + %91:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, %103, 0, implicit $mode, implicit $exec |
| 222 | + %90:sreg_32 = S_AND_B32 undef %91, %5, implicit-def dead $scc |
| 223 | + S_CMP_EQ_U32 %109, 0, implicit-def $scc |
| 224 | + %12:sreg_32_xm0_xexec = IMPLICIT_DEF |
| 225 | + S_CMP_EQ_U32 %102.sub1, 0, implicit-def $scc |
| 226 | + %11:sreg_32_xm0_xexec = IMPLICIT_DEF |
| 227 | + %77:sreg_32 = S_OR_B32 %115.sub7, %99, implicit-def dead $scc |
| 228 | + %82:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, %115.sub0, 0, implicit $mode, implicit $exec |
| 229 | + %79:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, %115.sub2, 0, implicit $mode, implicit $exec |
| 230 | + %78:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, %115.sub4, 0, implicit $mode, implicit $exec |
| 231 | + %76:sreg_32 = S_OR_B32 %10.sub0, undef %77, implicit-def dead $scc |
| 232 | + %75:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, %10.sub1, 0, implicit $mode, implicit $exec |
| 233 | + %74:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, %10.sub2, 0, implicit $mode, implicit $exec |
| 234 | + %73:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, %10.sub3, 0, implicit $mode, implicit $exec |
| 235 | + %72:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, %10.sub4, 0, implicit $mode, implicit $exec |
| 236 | + %70:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, %10.sub5, 0, implicit $mode, implicit $exec |
| 237 | + %69:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, %10.sub6, 0, implicit $mode, implicit $exec |
| 238 | + %87:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, %101.sub0, 0, implicit $mode, implicit $exec |
| 239 | + %86:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, %101.sub1, 0, implicit $mode, implicit $exec |
| 240 | + %83:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, %101.sub2, 0, implicit $mode, implicit $exec |
| 241 | + %89:sreg_32 = S_AND_B32 undef %11, %108, implicit-def dead $scc |
| 242 | + %88:sreg_32 = IMPLICIT_DEF |
| 243 | + %85:sreg_32 = IMPLICIT_DEF |
| 244 | + %84:sreg_32 = IMPLICIT_DEF |
| 245 | + %81:sreg_32 = IMPLICIT_DEF |
| 246 | + %80:sreg_32 = IMPLICIT_DEF |
| 247 | + %71:sreg_32 = S_AND_B32 undef %80, undef %80, implicit-def dead $scc |
| 248 | + %67:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, %114.sub0, 0, implicit $mode, implicit $exec |
| 249 | + %68:sreg_32 = S_AND_B32 undef %70, undef %69, implicit-def dead $scc |
| 250 | + %66:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, %114.sub2, 0, implicit $mode, implicit $exec |
| 251 | + %65:sreg_32 = S_OR_B32 %114.sub5, %114.sub7, implicit-def dead $scc |
| 252 | + %63:sreg_32 = S_OR_B32 %8.sub0, %8.sub1, implicit-def dead $scc |
| 253 | + %62:sreg_32 = S_OR_B32 %8.sub2, undef %63, implicit-def dead $scc |
| 254 | + %64:sreg_32 = S_AND_B32 undef %63, %4, implicit-def dead $scc |
| 255 | + %61:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, %8.sub3, 0, implicit $mode, implicit $exec |
| 256 | + %60:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, %8.sub4, 0, implicit $mode, implicit $exec |
| 257 | + %59:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, %8.sub5, 0, implicit $mode, implicit $exec |
| 258 | + %58:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, %8.sub6, 0, implicit $mode, implicit $exec |
| 259 | + %57:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, %98, 0, implicit $mode, implicit $exec |
| 260 | + %56:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, %113.sub0, 0, implicit $mode, implicit $exec |
| 261 | + %53:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, %113.sub2, 0, implicit $mode, implicit $exec |
| 262 | + %55:sreg_32 = IMPLICIT_DEF |
| 263 | + %52:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, %113.sub4, 0, implicit $mode, implicit $exec |
| 264 | + %54:sreg_32 = S_AND_B32 undef %55, undef %56, implicit-def dead $scc |
| 265 | + S_CMP_EQ_U32 %113.sub7, 0, implicit-def $scc |
| 266 | + %51:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, %112.sub0, 0, implicit $mode, implicit $exec |
| 267 | + %49:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, %112.sub2, 0, implicit $mode, implicit $exec |
| 268 | + %48:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, %112.sub4, 0, implicit $mode, implicit $exec |
| 269 | + %47:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, %112.sub6, 0, implicit $mode, implicit $exec |
| 270 | + %50:sreg_32 = S_AND_B32 undef %51, undef %51, implicit-def dead $scc |
| 271 | + %46:sreg_32 = S_OR_B32 %112.sub10, %112.sub9, implicit-def dead $scc |
| 272 | + %45:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, %112.sub13, 0, implicit $mode, implicit $exec |
| 273 | + %44:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, %112.sub14, 0, implicit $mode, implicit $exec |
| 274 | + S_CMP_EQ_U32 %111.sub1, 0, implicit-def $scc |
| 275 | + %116:sreg_32_xm0_xexec = IMPLICIT_DEF |
| 276 | + %42:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, %111.sub5, 0, implicit $mode, implicit $exec |
| 277 | + %41:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, %111.sub6, 0, implicit $mode, implicit $exec |
| 278 | + %43:sreg_32 = IMPLICIT_DEF |
| 279 | + %38:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, %111.sub9, 0, implicit $mode, implicit $exec |
| 280 | + %37:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, %111.sub10, 0, implicit $mode, implicit $exec |
| 281 | + %40:sreg_32 = IMPLICIT_DEF |
| 282 | + %39:sreg_32 = S_AND_B32 undef %40, undef %43, implicit-def dead $scc |
| 283 | + %36:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, %111.sub12, 0, implicit $mode, implicit $exec |
| 284 | + %34:sreg_32 = S_OR_B32 %7.sub0, %111.sub15, implicit-def dead $scc |
| 285 | + %35:sreg_32 = IMPLICIT_DEF |
| 286 | + %32:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, %7.sub1, 0, implicit $mode, implicit $exec |
| 287 | + %31:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, %7.sub2, 0, implicit $mode, implicit $exec |
| 288 | + %33:sreg_32 = IMPLICIT_DEF |
| 289 | + %28:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, %7.sub3, 0, implicit $mode, implicit $exec |
| 290 | + %27:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, %7.sub4, 0, implicit $mode, implicit $exec |
| 291 | + %30:sreg_32 = IMPLICIT_DEF |
| 292 | + %26:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, %7.sub5, 0, implicit $mode, implicit $exec |
| 293 | + %25:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, %7.sub6, 0, implicit $mode, implicit $exec |
| 294 | + %29:sreg_32 = S_AND_B32 undef %30, undef %33, implicit-def dead $scc |
| 295 | + %23:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, %6.sub0, 0, implicit $mode, implicit $exec |
| 296 | + %22:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, %6.sub1, 0, implicit $mode, implicit $exec |
| 297 | + %24:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, %96, 0, implicit $mode, implicit $exec |
| 298 | + %21:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, %6.sub2, 0, implicit $mode, implicit $exec |
| 299 | + %20:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, %6.sub3, 0, implicit $mode, implicit $exec |
| 300 | + %19:sreg_32 = S_OR_B32 %6.sub4, %6.sub5, implicit-def dead $scc |
| 301 | + S_CMP_EQ_U32 %96, 0, implicit-def $scc |
| 302 | + %18:sreg_32 = S_AND_B32 undef %19, %3, implicit-def dead $scc |
| 303 | + %14:sreg_32_xm0_xexec = S_LOAD_DWORD_IMM %93, 0, 0 :: ("amdgpu-noclobber" load (s32), align 8, addrspace 1) |
| 304 | + S_CMP_EQ_U32 %110.sub7, 0, implicit-def $scc |
| 305 | + %16:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, %95, 0, implicit $mode, implicit $exec |
| 306 | + %17:sreg_32 = S_AND_B32 %107, undef %14, implicit-def dead $scc |
| 307 | + %15:sreg_32 = S_AND_B32 undef %17, %2, implicit-def dead $scc |
| 308 | + $vcc = COPY undef %15 |
| 309 | + S_CBRANCH_VCCNZ %bb.12, implicit $vcc |
| 310 | + S_BRANCH %bb.11 |
| 311 | + |
| 312 | + bb.11: |
| 313 | + |
| 314 | + bb.12: |
| 315 | + GLOBAL_STORE_DWORD_SADDR undef %0, undef %0, %104, 0, 0, implicit $exec :: (store (s32), addrspace 1) |
| 316 | + GLOBAL_STORE_DWORD_SADDR undef %0, undef %0, %106, 0, 0, implicit $exec :: (store (s32), addrspace 1) |
| 317 | + S_ENDPGM 0 |
| 318 | +... |
0 commit comments