diff --git a/llvm/include/llvm/Support/TargetOpcodes.def b/llvm/include/llvm/Support/TargetOpcodes.def index 3824b1c66951e..a3c38a56561c1 100644 --- a/llvm/include/llvm/Support/TargetOpcodes.def +++ b/llvm/include/llvm/Support/TargetOpcodes.def @@ -170,15 +170,10 @@ HANDLE_TARGET_OPCODE(LOCAL_ESCAPE) /// comparisons into existing memory operations. HANDLE_TARGET_OPCODE(FAULTING_OP) -/// Wraps a machine instruction to add patchability constraints. An -/// instruction wrapped in PATCHABLE_OP has to either have a minimum +/// Precedes a machine instruction to add patchability constraints. An +/// instruction after PATCHABLE_OP has to either have a minimum /// size or be preceded with a nop of that size. The first operand is -/// an immediate denoting the minimum size of the instruction, the -/// second operand is an immediate denoting the opcode of the original -/// instruction. The rest of the operands are the operands of the -/// original instruction. -/// PATCHABLE_OP can be used as second operand to only insert a nop of -/// required size. +/// an immediate denoting the minimum size of the following instruction. HANDLE_TARGET_OPCODE(PATCHABLE_OP) /// This is a marker instruction which gets translated into a nop sled, useful diff --git a/llvm/lib/CodeGen/PatchableFunction.cpp b/llvm/lib/CodeGen/PatchableFunction.cpp index 9449f143366f0..75c2dfeca58d5 100644 --- a/llvm/lib/CodeGen/PatchableFunction.cpp +++ b/llvm/lib/CodeGen/PatchableFunction.cpp @@ -38,58 +38,28 @@ struct PatchableFunction : public MachineFunctionPass { } bool PatchableFunction::runOnMachineFunction(MachineFunction &MF) { + MachineBasicBlock &FirstMBB = *MF.begin(); + if (MF.getFunction().hasFnAttribute("patchable-function-entry")) { - MachineBasicBlock &FirstMBB = *MF.begin(); const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo(); // The initial .loc covers PATCHABLE_FUNCTION_ENTER. BuildMI(FirstMBB, FirstMBB.begin(), DebugLoc(), TII->get(TargetOpcode::PATCHABLE_FUNCTION_ENTER)); return true; - } - - if (!MF.getFunction().hasFnAttribute("patchable-function")) - return false; - + } else if (MF.getFunction().hasFnAttribute("patchable-function")) { #ifndef NDEBUG - Attribute PatchAttr = MF.getFunction().getFnAttribute("patchable-function"); - StringRef PatchType = PatchAttr.getValueAsString(); - assert(PatchType == "prologue-short-redirect" && "Only possibility today!"); + Attribute PatchAttr = MF.getFunction().getFnAttribute("patchable-function"); + StringRef PatchType = PatchAttr.getValueAsString(); + assert(PatchType == "prologue-short-redirect" && "Only possibility today!"); #endif - - auto &FirstMBB = *MF.begin(); - auto *TII = MF.getSubtarget().getInstrInfo(); - - MachineBasicBlock::iterator FirstActualI = llvm::find_if( - FirstMBB, [](const MachineInstr &MI) { return !MI.isMetaInstruction(); }); - - if (FirstActualI == FirstMBB.end()) { - // As of Microsoft documentation on /hotpatch feature, we must ensure that - // "the first instruction of each function is at least two bytes, and no - // jump within the function goes to the first instruction" - - // When the first MBB is empty, insert a patchable no-op. This ensures the - // first instruction is patchable in two special cases: - // - the function is empty (e.g. unreachable) - // - the function jumps back to the first instruction, which is in a - // successor MBB. - BuildMI(&FirstMBB, DebugLoc(), TII->get(TargetOpcode::PATCHABLE_OP)) - .addImm(2) - .addImm(TargetOpcode::PATCHABLE_OP); + auto *TII = MF.getSubtarget().getInstrInfo(); + BuildMI(FirstMBB, FirstMBB.begin(), DebugLoc(), + TII->get(TargetOpcode::PATCHABLE_OP)) + .addImm(2); MF.ensureAlignment(Align(16)); return true; } - - auto MIB = BuildMI(FirstMBB, FirstActualI, FirstActualI->getDebugLoc(), - TII->get(TargetOpcode::PATCHABLE_OP)) - .addImm(2) - .addImm(FirstActualI->getOpcode()); - - for (auto &MO : FirstActualI->operands()) - MIB.add(MO); - - FirstActualI->eraseFromParent(); - MF.ensureAlignment(Align(16)); - return true; + return false; } char PatchableFunction::ID = 0; diff --git a/llvm/lib/Target/X86/X86MCInstLower.cpp b/llvm/lib/Target/X86/X86MCInstLower.cpp index e1a67f61e7664..e4d1d13cc3159 100644 --- a/llvm/lib/Target/X86/X86MCInstLower.cpp +++ b/llvm/lib/Target/X86/X86MCInstLower.cpp @@ -948,24 +948,22 @@ void X86AsmPrinter::LowerASAN_CHECK_MEMACCESS(const MachineInstr &MI) { void X86AsmPrinter::LowerPATCHABLE_OP(const MachineInstr &MI, X86MCInstLower &MCIL) { - // PATCHABLE_OP minsize, opcode, operands + // PATCHABLE_OP minsize NoAutoPaddingScope NoPadScope(*OutStreamer); - unsigned MinSize = MI.getOperand(0).getImm(); - unsigned Opcode = MI.getOperand(1).getImm(); - // Opcode PATCHABLE_OP is a special case: there is no instruction to wrap, - // simply emit a nop of size MinSize. - bool EmptyInst = (Opcode == TargetOpcode::PATCHABLE_OP); - - MCInst MCI; - MCI.setOpcode(Opcode); - for (auto &MO : drop_begin(MI.operands(), 2)) - if (auto MaybeOperand = MCIL.LowerMachineOperand(&MI, MO)) - MCI.addOperand(*MaybeOperand); + auto NextMI = std::find_if(std::next(MI.getIterator()), + MI.getParent()->end().getInstrIterator(), + [](auto &II) { return !II.isMetaInstruction(); }); SmallString<256> Code; - if (!EmptyInst) { + unsigned MinSize = MI.getOperand(0).getImm(); + + if (NextMI != MI.getParent()->end()) { + // Lower the next MachineInstr to find its byte size. + MCInst MCI; + MCIL.Lower(&*NextMI, MCI); + SmallVector Fixups; CodeEmitter->encodeInstruction(MCI, Code, Fixups, getSubtargetInfo()); } @@ -981,21 +979,12 @@ void X86AsmPrinter::LowerPATCHABLE_OP(const MachineInstr &MI, OutStreamer->emitInstruction( MCInstBuilder(X86::MOV32rr_REV).addReg(X86::EDI).addReg(X86::EDI), *Subtarget); - } else if (MinSize == 2 && Opcode == X86::PUSH64r) { - // This is an optimization that lets us get away without emitting a nop in - // many cases. - // - // NB! In some cases the encoding for PUSH64r (e.g. PUSH64r %r9) takes two - // bytes too, so the check on MinSize is important. - MCI.setOpcode(X86::PUSH64rmr); } else { unsigned NopSize = emitNop(*OutStreamer, MinSize, Subtarget); assert(NopSize == MinSize && "Could not implement MinSize!"); (void)NopSize; } } - if (!EmptyInst) - OutStreamer->emitInstruction(MCI, getSubtargetInfo()); } // Lower a stackmap of the form: diff --git a/llvm/test/CodeGen/X86/patchable-prologue-tailcall.ll b/llvm/test/CodeGen/X86/patchable-prologue-tailcall.ll new file mode 100644 index 0000000000000..5e55524fecc9a --- /dev/null +++ b/llvm/test/CodeGen/X86/patchable-prologue-tailcall.ll @@ -0,0 +1,34 @@ +; RUN: llc -verify-machineinstrs -mtriple=x86_64-windows-msvc < %s | FileCheck %s --check-prefix=CHECK + +; CHECK: f1: +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: jmp f0 # TAILCALL + +; CHECK: f2: +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: jmp malloc # TAILCALL + +define ptr @f1(i64 %count) "patchable-function"="prologue-short-redirect" { +entry: + %call = tail call ptr @f0(i64 %count) + ret ptr %call +} + +declare ptr @f0(i64) + +define noalias ptr @f2(i64 %count) "patchable-function"="prologue-short-redirect" { +entry: + %call = tail call ptr @malloc(i64 %count) + ret ptr %call +} + +declare noalias ptr @malloc(i64) #0 + +attributes #0 = { allockind("alloc,uninitialized") allocsize(0) memory(inaccessiblemem: readwrite) "alloc-family"="malloc" } + +!llvm.module.flags = !{!0, !1, !2, !3} + +!0 = !{i32 1, !"wchar_size", i32 2} +!1 = !{i32 8, !"PIC Level", i32 2} +!2 = !{i32 7, !"uwtable", i32 2} +!3 = !{i32 1, !"MaxTLSAlign", i32 65536} diff --git a/llvm/test/CodeGen/X86/patchable-prologue.ll b/llvm/test/CodeGen/X86/patchable-prologue.ll index f1a39777a40bc..71a392845fdea 100644 --- a/llvm/test/CodeGen/X86/patchable-prologue.ll +++ b/llvm/test/CodeGen/X86/patchable-prologue.ll @@ -32,7 +32,8 @@ define void @f0() "patchable-function"="prologue-short-redirect" { define void @f1() "patchable-function"="prologue-short-redirect" "frame-pointer"="all" { ; CHECK-LABEL: _f1 -; CHECK-NEXT: ff f5 pushq %rbp +; CHECK-NEXT: 66 90 nop +; CHECK-NEXT: 55 pushq %rbp ; CHECK-ALIGN: .p2align 4, 0x90 ; CHECK-ALIGN: _f1: @@ -47,6 +48,7 @@ define void @f1() "patchable-function"="prologue-short-redirect" "frame-pointer" ; 64: f1: ; 64-NEXT: .seh_proc f1 ; 64-NEXT: # %bb.0: +; 64-NEXT: xchgw %ax, %ax ; 64-NEXT: pushq %rbp ret void