Skip to content

[MCP] Optimize copies when src is used during backward propagation #111130

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 4 commits into from
Oct 23, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
79 changes: 77 additions & 2 deletions llvm/lib/CodeGen/MachineCopyPropagation.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -110,6 +110,7 @@ class CopyTracker {
struct CopyInfo {
MachineInstr *MI = nullptr;
MachineInstr *LastSeenUseInCopy = nullptr;
SmallPtrSet<MachineInstr *, 4> SrcUsers;
SmallVector<MCRegister, 4> DefRegs;
bool Avail = false;
};
Expand Down Expand Up @@ -224,6 +225,43 @@ class CopyTracker {
}
}

/// Track copy's src users, and return false if that can't be done.
/// We can only track if we have a COPY instruction which source is
/// the same as the Reg.
bool trackSrcUsers(MCRegister Reg, MachineInstr &MI,
const TargetRegisterInfo &TRI, const TargetInstrInfo &TII,
bool UseCopyInstr) {
MCRegUnit RU = *TRI.regunits(Reg).begin();
MachineInstr *AvailCopy = findCopyDefViaUnit(RU, TRI);
if (!AvailCopy)
return false;

std::optional<DestSourcePair> CopyOperands =
isCopyInstr(*AvailCopy, TII, UseCopyInstr);
Register Src = CopyOperands->Source->getReg();

// Bail out, if the source of the copy is not the same as the Reg.
if (Src != Reg)
return false;

auto I = Copies.find(RU);
if (I == Copies.end())
return false;

I->second.SrcUsers.insert(&MI);
return true;
}

/// Return the users for a given register.
SmallPtrSet<MachineInstr *, 4> getSrcUsers(MCRegister Reg,
const TargetRegisterInfo &TRI) {
MCRegUnit RU = *TRI.regunits(Reg).begin();
auto I = Copies.find(RU);
if (I == Copies.end())
return {};
return I->second.SrcUsers;
}

/// Add this copy's registers into the tracker's copy maps.
void trackCopy(MachineInstr *MI, const TargetRegisterInfo &TRI,
const TargetInstrInfo &TII, bool UseCopyInstr) {
Expand All @@ -236,7 +274,7 @@ class CopyTracker {

// Remember Def is defined by the copy.
for (MCRegUnit Unit : TRI.regunits(Def))
Copies[Unit] = {MI, nullptr, {}, true};
Copies[Unit] = {MI, nullptr, {}, {}, true};

// Remember source that's copied to Def. Once it's clobbered, then
// it's no longer available for copy propagation.
Expand Down Expand Up @@ -427,6 +465,8 @@ class MachineCopyPropagation : public MachineFunctionPass {
bool hasImplicitOverlap(const MachineInstr &MI, const MachineOperand &Use);
bool hasOverlappingMultipleDef(const MachineInstr &MI,
const MachineOperand &MODef, Register Def);
bool canUpdateSrcUsers(const MachineInstr &Copy,
const MachineOperand &CopySrc);

/// Candidates for deletion.
SmallSetVector<MachineInstr *, 8> MaybeDeadCopies;
Expand Down Expand Up @@ -667,6 +707,27 @@ bool MachineCopyPropagation::hasOverlappingMultipleDef(
return false;
}

/// Return true if it is safe to update all users of the \p CopySrc register
/// in the given \p Copy instruction.
bool MachineCopyPropagation::canUpdateSrcUsers(const MachineInstr &Copy,
const MachineOperand &CopySrc) {
assert(CopySrc.isReg() && "Expected a register operand");
for (auto *SrcUser : Tracker.getSrcUsers(CopySrc.getReg(), *TRI)) {
if (hasImplicitOverlap(*SrcUser, CopySrc))
return false;

for (MachineOperand &MO : SrcUser->uses()) {
if (!MO.isReg() || !MO.isUse() || MO.getReg() != CopySrc.getReg())
continue;
if (MO.isTied() || !MO.isRenamable() ||
!isBackwardPropagatableRegClassCopy(Copy, *SrcUser,
MO.getOperandNo()))
return false;
}
}
return true;
}

/// Look for available copies whose destination register is used by \p MI and
/// replace the use in \p MI with the copy's source register.
void MachineCopyPropagation::forwardUses(MachineInstr &MI) {
Expand Down Expand Up @@ -1033,13 +1094,25 @@ void MachineCopyPropagation::propagateDefs(MachineInstr &MI) {
if (hasOverlappingMultipleDef(MI, MODef, Def))
continue;

if (!canUpdateSrcUsers(*Copy, *CopyOperands->Source))
continue;

LLVM_DEBUG(dbgs() << "MCP: Replacing " << printReg(MODef.getReg(), TRI)
<< "\n with " << printReg(Def, TRI) << "\n in "
<< MI << " from " << *Copy);

MODef.setReg(Def);
MODef.setIsRenamable(CopyOperands->Destination->isRenamable());

for (auto *SrcUser : Tracker.getSrcUsers(Src, *TRI)) {
for (MachineOperand &MO : SrcUser->uses()) {
if (!MO.isReg() || !MO.isUse() || MO.getReg() != Src)
continue;
MO.setReg(Def);
MO.setIsRenamable(CopyOperands->Destination->isRenamable());
}
}

LLVM_DEBUG(dbgs() << "MCP: After replacement: " << MI << "\n");
MaybeDeadCopies.insert(Copy);
Changed = true;
Expand Down Expand Up @@ -1105,7 +1178,9 @@ void MachineCopyPropagation::BackwardCopyPropagateBlock(
CopyDbgUsers[Copy].insert(&MI);
}
}
} else {
} else if (!Tracker.trackSrcUsers(MO.getReg().asMCReg(), MI, *TRI, *TII,
UseCopyInstr)) {
// If we can't track the source users, invalidate the register.
Tracker.invalidateRegister(MO.getReg().asMCReg(), *TRI, *TII,
UseCopyInstr);
}
Expand Down
58 changes: 58 additions & 0 deletions llvm/test/CodeGen/AArch64/machine-cp-backward-uses.mir
Original file line number Diff line number Diff line change
@@ -0,0 +1,58 @@
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5
# RUN: llc -mtriple=aarch64-none-linux-gnu -run-pass machine-cp -verify-machineinstrs -o - %s | FileCheck %s

# Normal case
---
name: test1
body: |
bb.0:
liveins: $w2
; CHECK-LABEL: name: test1
; CHECK: liveins: $w2
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: renamable $w0 = MOVi32imm 5
; CHECK-NEXT: renamable $w3 = ADDWrr renamable $w0, killed renamable $w2
; CHECK-NEXT: RET_ReallyLR implicit killed $w0
renamable $w1 = MOVi32imm 5
renamable $w3 = ADDWrr renamable $w1, killed renamable $w2
renamable $w0 = COPY killed renamable $w1
RET_ReallyLR implicit killed $w0
...

# Not renamable use
---
name: test2
body: |
bb.0:
liveins: $w2
; CHECK-LABEL: name: test2
; CHECK: liveins: $w2
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: renamable $w1 = MOVi32imm 5
; CHECK-NEXT: renamable $w3 = ADDWrr $w1, killed renamable $w2
; CHECK-NEXT: renamable $w0 = COPY killed renamable $w1
; CHECK-NEXT: RET_ReallyLR implicit killed $w0
renamable $w1 = MOVi32imm 5
renamable $w3 = ADDWrr $w1, killed renamable $w2
renamable $w0 = COPY killed renamable $w1
RET_ReallyLR implicit killed $w0
...

# Implicit use
---
name: test3
body: |
bb.0:
liveins: $w2
; CHECK-LABEL: name: test3
; CHECK: liveins: $w2
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: renamable $w1 = MOVi32imm 5
; CHECK-NEXT: renamable $w3 = ADDWrr renamable $w1, killed renamable $w2, implicit $w1
; CHECK-NEXT: renamable $w0 = COPY killed renamable $w1
; CHECK-NEXT: RET_ReallyLR implicit killed $w0
renamable $w1 = MOVi32imm 5
renamable $w3 = ADDWrr renamable $w1, killed renamable $w2, implicit $w1
renamable $w0 = COPY killed renamable $w1
RET_ReallyLR implicit killed $w0
...
5 changes: 2 additions & 3 deletions llvm/test/CodeGen/ARM/umulo-128-legalisation-lowering.ll
Original file line number Diff line number Diff line change
Expand Up @@ -7,12 +7,11 @@ define { i128, i8 } @muloti_test(i128 %l, i128 %r) unnamed_addr #0 {
; ARMV6: @ %bb.0: @ %start
; ARMV6-NEXT: push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
; ARMV6-NEXT: sub sp, sp, #28
; ARMV6-NEXT: ldr r7, [sp, #72]
; ARMV6-NEXT: ldr lr, [sp, #72]
; ARMV6-NEXT: mov r6, r0
; ARMV6-NEXT: str r0, [sp, #8] @ 4-byte Spill
; ARMV6-NEXT: ldr r4, [sp, #84]
; ARMV6-NEXT: umull r1, r0, r2, r7
; ARMV6-NEXT: mov lr, r7
; ARMV6-NEXT: umull r1, r0, r2, lr
; ARMV6-NEXT: umull r5, r10, r4, r2
; ARMV6-NEXT: str r1, [r6]
; ARMV6-NEXT: ldr r6, [sp, #80]
Expand Down
68 changes: 30 additions & 38 deletions llvm/test/CodeGen/Mips/llvm-ir/sdiv.ll
Original file line number Diff line number Diff line change
Expand Up @@ -388,9 +388,8 @@ define signext i64 @sdiv_i64(i64 signext %a, i64 signext %b) {
; MMR3-NEXT: .cfi_def_cfa_offset 24
; MMR3-NEXT: sw $ra, 20($sp) # 4-byte Folded Spill
; MMR3-NEXT: .cfi_offset 31, -4
; MMR3-NEXT: addu $2, $2, $25
; MMR3-NEXT: lw $25, %call16(__divdi3)($2)
; MMR3-NEXT: move $gp, $2
; MMR3-NEXT: addu $gp, $2, $25
; MMR3-NEXT: lw $25, %call16(__divdi3)($gp)
; MMR3-NEXT: jalr $25
; MMR3-NEXT: nop
; MMR3-NEXT: lw $ra, 20($sp) # 4-byte Folded Reload
Expand All @@ -405,9 +404,8 @@ define signext i64 @sdiv_i64(i64 signext %a, i64 signext %b) {
; MMR6-NEXT: .cfi_def_cfa_offset 24
; MMR6-NEXT: sw $ra, 20($sp) # 4-byte Folded Spill
; MMR6-NEXT: .cfi_offset 31, -4
; MMR6-NEXT: addu $2, $2, $25
; MMR6-NEXT: lw $25, %call16(__divdi3)($2)
; MMR6-NEXT: move $gp, $2
; MMR6-NEXT: addu $gp, $2, $25
; MMR6-NEXT: lw $25, %call16(__divdi3)($gp)
; MMR6-NEXT: jalr $25
; MMR6-NEXT: lw $ra, 20($sp) # 4-byte Folded Reload
; MMR6-NEXT: addiu $sp, $sp, 24
Expand Down Expand Up @@ -549,65 +547,59 @@ define signext i128 @sdiv_i128(i128 signext %a, i128 signext %b) {
; MMR3: # %bb.0: # %entry
; MMR3-NEXT: lui $2, %hi(_gp_disp)
; MMR3-NEXT: addiu $2, $2, %lo(_gp_disp)
; MMR3-NEXT: addiusp -48
; MMR3-NEXT: .cfi_def_cfa_offset 48
; MMR3-NEXT: sw $ra, 44($sp) # 4-byte Folded Spill
; MMR3-NEXT: swp $16, 36($sp)
; MMR3-NEXT: addiusp -40
; MMR3-NEXT: .cfi_def_cfa_offset 40
; MMR3-NEXT: sw $ra, 36($sp) # 4-byte Folded Spill
; MMR3-NEXT: sw $17, 32($sp) # 4-byte Folded Spill
; MMR3-NEXT: .cfi_offset 31, -4
; MMR3-NEXT: .cfi_offset 17, -8
; MMR3-NEXT: .cfi_offset 16, -12
; MMR3-NEXT: addu $16, $2, $25
; MMR3-NEXT: addu $gp, $2, $25
; MMR3-NEXT: move $1, $7
; MMR3-NEXT: lw $7, 68($sp)
; MMR3-NEXT: lw $17, 72($sp)
; MMR3-NEXT: lw $3, 76($sp)
; MMR3-NEXT: lw $7, 60($sp)
; MMR3-NEXT: lw $17, 64($sp)
; MMR3-NEXT: lw $3, 68($sp)
; MMR3-NEXT: move $2, $sp
; MMR3-NEXT: sw16 $3, 28($2)
; MMR3-NEXT: sw16 $17, 24($2)
; MMR3-NEXT: sw16 $7, 20($2)
; MMR3-NEXT: lw $3, 64($sp)
; MMR3-NEXT: lw $3, 56($sp)
; MMR3-NEXT: sw16 $3, 16($2)
; MMR3-NEXT: lw $25, %call16(__divti3)($16)
; MMR3-NEXT: lw $25, %call16(__divti3)($gp)
; MMR3-NEXT: move $7, $1
; MMR3-NEXT: move $gp, $16
; MMR3-NEXT: jalr $25
; MMR3-NEXT: nop
; MMR3-NEXT: lwp $16, 36($sp)
; MMR3-NEXT: lw $ra, 44($sp) # 4-byte Folded Reload
; MMR3-NEXT: addiusp 48
; MMR3-NEXT: lw $17, 32($sp) # 4-byte Folded Reload
; MMR3-NEXT: lw $ra, 36($sp) # 4-byte Folded Reload
; MMR3-NEXT: addiusp 40
; MMR3-NEXT: jrc $ra
;
; MMR6-LABEL: sdiv_i128:
; MMR6: # %bb.0: # %entry
; MMR6-NEXT: lui $2, %hi(_gp_disp)
; MMR6-NEXT: addiu $2, $2, %lo(_gp_disp)
; MMR6-NEXT: addiu $sp, $sp, -48
; MMR6-NEXT: .cfi_def_cfa_offset 48
; MMR6-NEXT: sw $ra, 44($sp) # 4-byte Folded Spill
; MMR6-NEXT: sw $17, 40($sp) # 4-byte Folded Spill
; MMR6-NEXT: sw $16, 36($sp) # 4-byte Folded Spill
; MMR6-NEXT: addiu $sp, $sp, -40
; MMR6-NEXT: .cfi_def_cfa_offset 40
; MMR6-NEXT: sw $ra, 36($sp) # 4-byte Folded Spill
; MMR6-NEXT: sw $17, 32($sp) # 4-byte Folded Spill
; MMR6-NEXT: .cfi_offset 31, -4
; MMR6-NEXT: .cfi_offset 17, -8
; MMR6-NEXT: .cfi_offset 16, -12
; MMR6-NEXT: addu $16, $2, $25
; MMR6-NEXT: addu $gp, $2, $25
; MMR6-NEXT: move $1, $7
; MMR6-NEXT: lw $7, 68($sp)
; MMR6-NEXT: lw $17, 72($sp)
; MMR6-NEXT: lw $3, 76($sp)
; MMR6-NEXT: lw $7, 60($sp)
; MMR6-NEXT: lw $17, 64($sp)
; MMR6-NEXT: lw $3, 68($sp)
; MMR6-NEXT: move $2, $sp
; MMR6-NEXT: sw16 $3, 28($2)
; MMR6-NEXT: sw16 $17, 24($2)
; MMR6-NEXT: sw16 $7, 20($2)
; MMR6-NEXT: lw $3, 64($sp)
; MMR6-NEXT: lw $3, 56($sp)
; MMR6-NEXT: sw16 $3, 16($2)
; MMR6-NEXT: lw $25, %call16(__divti3)($16)
; MMR6-NEXT: lw $25, %call16(__divti3)($gp)
; MMR6-NEXT: move $7, $1
; MMR6-NEXT: move $gp, $16
; MMR6-NEXT: jalr $25
; MMR6-NEXT: lw $16, 36($sp) # 4-byte Folded Reload
; MMR6-NEXT: lw $17, 40($sp) # 4-byte Folded Reload
; MMR6-NEXT: lw $ra, 44($sp) # 4-byte Folded Reload
; MMR6-NEXT: addiu $sp, $sp, 48
; MMR6-NEXT: lw $17, 32($sp) # 4-byte Folded Reload
; MMR6-NEXT: lw $ra, 36($sp) # 4-byte Folded Reload
; MMR6-NEXT: addiu $sp, $sp, 40
; MMR6-NEXT: jrc $ra
entry:
%r = sdiv i128 %a, %b
Expand Down
Loading
Loading