diff --git a/llvm/lib/Transforms/Scalar/GVNSink.cpp b/llvm/lib/Transforms/Scalar/GVNSink.cpp index d4907326eb0a5..ddf01dc612bb5 100644 --- a/llvm/lib/Transforms/Scalar/GVNSink.cpp +++ b/llvm/lib/Transforms/Scalar/GVNSink.cpp @@ -226,12 +226,22 @@ class ModelledPHI { public: ModelledPHI() = default; - ModelledPHI(const PHINode *PN) { - // BasicBlock comes first so we sort by basic block pointer order, then by value pointer order. - SmallVector, 4> Ops; + ModelledPHI(const PHINode *PN, + const DenseMap &BlockOrder) { + // BasicBlock comes first so we sort by basic block pointer order, + // then by value pointer order. No need to call `verifyModelledPHI` + // As the Values and Blocks are populated in a deterministic order. + using OpsType = std::pair; + SmallVector Ops; for (unsigned I = 0, E = PN->getNumIncomingValues(); I != E; ++I) Ops.push_back({PN->getIncomingBlock(I), PN->getIncomingValue(I)}); - llvm::sort(Ops); + + auto ComesBefore = [BlockOrder](OpsType O1, OpsType O2) { + return BlockOrder.lookup(O1.first) < BlockOrder.lookup(O2.first); + }; + // Sort in a deterministic order. + llvm::sort(Ops, ComesBefore); + for (auto &P : Ops) { Blocks.push_back(P.first); Values.push_back(P.second); @@ -247,16 +257,38 @@ class ModelledPHI { return M; } + void + verifyModelledPHI(const DenseMap &BlockOrder) { + assert(Values.size() > 1 && Blocks.size() > 1 && + "Modelling PHI with less than 2 values"); + auto ComesBefore = [BlockOrder](const BasicBlock *BB1, + const BasicBlock *BB2) { + return BlockOrder.lookup(BB1) < BlockOrder.lookup(BB2); + }; + assert(llvm::is_sorted(Blocks, ComesBefore)); + int C = 0; + llvm::for_each(Values, [&C, this](const Value *V) { + if (!isa(V)) { + const Instruction *I = cast(V); + assert(I->getParent() == this->Blocks[C]); + } + C++; + }); + } /// Create a PHI from an array of incoming values and incoming blocks. - template - ModelledPHI(const VArray &V, const BArray &B) { + ModelledPHI(SmallVectorImpl &V, + SmallSetVector &B, + const DenseMap &BlockOrder) { + // The order of Values and Blocks are already ordered by the caller. llvm::copy(V, std::back_inserter(Values)); llvm::copy(B, std::back_inserter(Blocks)); + verifyModelledPHI(BlockOrder); } /// Create a PHI from [I[OpNum] for I in Insts]. - template - ModelledPHI(ArrayRef Insts, unsigned OpNum, const BArray &B) { + /// TODO: Figure out a way to verifyModelledPHI in this constructor. + ModelledPHI(ArrayRef Insts, unsigned OpNum, + SmallSetVector &B) { llvm::copy(B, std::back_inserter(Blocks)); for (auto *I : Insts) Values.push_back(I->getOperand(OpNum)); @@ -297,7 +329,8 @@ class ModelledPHI { // Hash functor unsigned hash() const { - return (unsigned)hash_combine_range(Values.begin(), Values.end()); + // Is deterministic because Values are saved in a specific order. + return (unsigned)hash_combine_range(Values.begin(), Values.end()); } bool operator==(const ModelledPHI &Other) const { @@ -566,7 +599,7 @@ class ValueTable { class GVNSink { public: - GVNSink() = default; + GVNSink() {} bool run(Function &F) { LLVM_DEBUG(dbgs() << "GVNSink: running on function @" << F.getName() @@ -575,6 +608,16 @@ class GVNSink { unsigned NumSunk = 0; ReversePostOrderTraversal RPOT(&F); VN.setReachableBBs(BasicBlocksSet(RPOT.begin(), RPOT.end())); + // Populate reverse post-order to order basic blocks in deterministic + // order. Any arbitrary ordering will work in this case as long as they are + // deterministic. The node ordering of newly created basic blocks + // are irrelevant because RPOT(for computing sinkable candidates) is also + // obtained ahead of time and only their order are relevant for this pass. + unsigned NodeOrdering = 0; + RPOTOrder[*RPOT.begin()] = ++NodeOrdering; + for (auto *BB : RPOT) + if (!pred_empty(BB)) + RPOTOrder[BB] = ++NodeOrdering; for (auto *N : RPOT) NumSunk += sinkBB(N); @@ -583,6 +626,7 @@ class GVNSink { private: ValueTable VN; + DenseMap RPOTOrder; bool shouldAvoidSinkingInstruction(Instruction *I) { // These instructions may change or break semantics if moved. @@ -603,7 +647,7 @@ class GVNSink { void analyzeInitialPHIs(BasicBlock *BB, ModelledPHISet &PHIs, SmallPtrSetImpl &PHIContents) { for (PHINode &PN : BB->phis()) { - auto MPHI = ModelledPHI(&PN); + auto MPHI = ModelledPHI(&PN, RPOTOrder); PHIs.insert(MPHI); for (auto *V : MPHI.getValues()) PHIContents.insert(V); @@ -691,7 +735,7 @@ GVNSink::analyzeInstructionForSinking(LockstepReverseIterator &LRI, } // The sunk instruction's results. - ModelledPHI NewPHI(NewInsts, ActivePreds); + ModelledPHI NewPHI(NewInsts, ActivePreds, RPOTOrder); // Does sinking this instruction render previous PHIs redundant? if (NeededPHIs.erase(NewPHI)) @@ -766,6 +810,9 @@ unsigned GVNSink::sinkBB(BasicBlock *BBEnd) { BBEnd->printAsOperand(dbgs()); dbgs() << "\n"); SmallVector Preds; for (auto *B : predecessors(BBEnd)) { + // Bailout on basic blocks without predecessor(PR42346). + if (!RPOTOrder.count(B)) + return 0; auto *T = B->getTerminator(); if (isa(T) || isa(T)) Preds.push_back(B); @@ -774,7 +821,11 @@ unsigned GVNSink::sinkBB(BasicBlock *BBEnd) { } if (Preds.size() < 2) return 0; - llvm::sort(Preds); + auto ComesBefore = [this](const BasicBlock *BB1, const BasicBlock *BB2) { + return RPOTOrder.lookup(BB1) < RPOTOrder.lookup(BB2); + }; + // Sort in a deterministic order. + llvm::sort(Preds, ComesBefore); unsigned NumOrigPreds = Preds.size(); // We can only sink instructions through unconditional branches. @@ -889,5 +940,6 @@ PreservedAnalyses GVNSinkPass::run(Function &F, FunctionAnalysisManager &AM) { GVNSink G; if (!G.run(F)) return PreservedAnalyses::all(); + return PreservedAnalyses::none(); } diff --git a/llvm/test/Transforms/GVNSink/int_sideeffect.ll b/llvm/test/Transforms/GVNSink/int_sideeffect.ll index 3cc54e84f17c1..9a3bc062dd946 100644 --- a/llvm/test/Transforms/GVNSink/int_sideeffect.ll +++ b/llvm/test/Transforms/GVNSink/int_sideeffect.ll @@ -28,3 +28,29 @@ if.end: ret float %phi } +; CHECK-LABEL: scalarsSinkingReverse +; CHECK-NOT: fmul +; CHECK: = phi +; CHECK: = fmul +define float @scalarsSinkingReverse(float %d, float %m, float %a, i1 %cmp) { +; This test is just a reverse(graph mirror) of the test +; above to ensure GVNSink doesn't depend on the order of branches. +entry: + br i1 %cmp, label %if.then, label %if.else + +if.then: + %add = fadd float %m, %a + %mul1 = fmul float %add, %d + br label %if.end + +if.else: + call void @llvm.sideeffect() + %sub = fsub float %m, %a + %mul0 = fmul float %sub, %d + br label %if.end + +if.end: + %phi = phi float [ %mul1, %if.then ], [ %mul0, %if.else ] + ret float %phi +} +