diff --git a/llvm/test/CodeGen/AMDGPU/vgpr-remat-v_pk_mov_b32.mir b/llvm/test/CodeGen/AMDGPU/vgpr-remat-v_pk_mov_b32.mir new file mode 100644 index 0000000000000..c8d6bf386078f --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/vgpr-remat-v_pk_mov_b32.mir @@ -0,0 +1,49 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py +# RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx942 -run-pass=register-coalescer -o - %s | FileCheck %s + +--- +name: test_remat_v_pk_mov_b32 +tracksRegLiveness: true +body: | + ; CHECK-LABEL: name: test_remat_v_pk_mov_b32 + ; CHECK: bb.0: + ; CHECK-NEXT: successors: %bb.2(0x40000000), %bb.1(0x40000000) + ; CHECK-NEXT: liveins: $sgpr0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[V_PK_MOV_B32_:%[0-9]+]]:vreg_64_align2 = V_PK_MOV_B32 8, 0, 8, 0, 0, 0, 0, 0, 0, implicit $exec + ; CHECK-NEXT: [[COPY:%[0-9]+]]:vreg_64_align2 = COPY [[V_PK_MOV_B32_]] + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vreg_64_align2 = COPY [[V_PK_MOV_B32_]] + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:sreg_64 = COPY $sgpr0 + ; CHECK-NEXT: $exec = S_MOV_B64_term [[COPY2]] + ; CHECK-NEXT: S_CBRANCH_EXECZ %bb.2, implicit $exec + ; CHECK-NEXT: S_BRANCH %bb.1 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.1: + ; CHECK-NEXT: successors: %bb.2(0x80000000) + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:vreg_64_align2 = V_PK_ADD_F32 8, [[COPY]], 8, [[COPY]], 11, 0, 0, 0, 0, implicit $mode, implicit $exec + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vreg_64_align2 = V_PK_ADD_F32 8, [[COPY1]], 8, [[COPY1]], 11, 0, 0, 0, 0, implicit $mode, implicit $exec + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.2: + ; CHECK-NEXT: S_NOP 0, implicit [[COPY]] + ; CHECK-NEXT: S_NOP 0, implicit [[COPY1]] + ; CHECK-NEXT: S_ENDPGM 0, implicit [[V_PK_MOV_B32_]] + bb.0: + liveins: $sgpr0 + %0:vreg_64_align2 = V_PK_MOV_B32 8, 0, 8, 0, 0, 0, 0, 0, 0, implicit $exec + %1:vreg_64_align2 = COPY %0:vreg_64_align2 + %2:vreg_64_align2 = COPY %0:vreg_64_align2 + %3:sreg_64 = COPY $sgpr0 + $exec = S_MOV_B64_term %3:sreg_64 + S_CBRANCH_EXECZ %bb.2, implicit $exec + S_BRANCH %bb.1 + + bb.1: + %1:vreg_64_align2 = V_PK_ADD_F32 8, %1, 8, %1, 11, 0, 0, 0, 0, implicit $mode, implicit $exec + %2:vreg_64_align2 = V_PK_ADD_F32 8, %2, 8, %2, 11, 0, 0, 0, 0, implicit $mode, implicit $exec + + bb.2: + S_NOP 0, implicit %1 + S_NOP 0, implicit %2 + S_ENDPGM 0, implicit %0 +...