diff --git a/llvm/lib/Transforms/IPO/MemProfContextDisambiguation.cpp b/llvm/lib/Transforms/IPO/MemProfContextDisambiguation.cpp index f5ae204426170..df1f6fddeba60 100644 --- a/llvm/lib/Transforms/IPO/MemProfContextDisambiguation.cpp +++ b/llvm/lib/Transforms/IPO/MemProfContextDisambiguation.cpp @@ -2286,8 +2286,7 @@ void CallsiteContextGraph AllCalls; AllCalls.reserve(Node->MatchingCalls.size() + 1); AllCalls.push_back(Node->Call); - AllCalls.insert(AllCalls.end(), Node->MatchingCalls.begin(), - Node->MatchingCalls.end()); + llvm::append_range(AllCalls, Node->MatchingCalls); // First see if we can partition the calls by callee function, creating new // nodes to host each set of calls calling the same callees. This is @@ -2468,9 +2467,8 @@ bool CallsiteContextGraph::partitionCallsByCallee( // The first call becomes the primary call for this caller node, and the // rest go in the matching calls list. Info->Node->setCall(Info->Calls.front()); - Info->Node->MatchingCalls.insert(Info->Node->MatchingCalls.end(), - Info->Calls.begin() + 1, - Info->Calls.end()); + llvm::append_range(Info->Node->MatchingCalls, + llvm::drop_begin(Info->Calls)); // Save the primary call to node correspondence so that we can update // the NonAllocationCallToContextNodeMap, which is being iterated in the // caller of this function. @@ -4117,8 +4115,7 @@ bool CallsiteContextGraph::assignFunctions() { // Ignore original Node if we moved all of its contexts to clones. if (!Node->emptyContextIds()) ClonesWorklist.push_back(Node); - ClonesWorklist.insert(ClonesWorklist.end(), Node->Clones.begin(), - Node->Clones.end()); + llvm::append_range(ClonesWorklist, Node->Clones); // Now walk through all of the clones of this callsite Node that we need, // and determine the assignment to a corresponding clone of the current diff --git a/llvm/lib/Transforms/Scalar/DFAJumpThreading.cpp b/llvm/lib/Transforms/Scalar/DFAJumpThreading.cpp index 82434680b8f23..938aab5879044 100644 --- a/llvm/lib/Transforms/Scalar/DFAJumpThreading.cpp +++ b/llvm/lib/Transforms/Scalar/DFAJumpThreading.cpp @@ -399,7 +399,7 @@ struct ThreadingPath { void push_back(BasicBlock *BB) { Path.push_back(BB); } void push_front(BasicBlock *BB) { Path.push_front(BB); } void appendExcludingFirst(const PathType &OtherPath) { - Path.insert(Path.end(), OtherPath.begin() + 1, OtherPath.end()); + llvm::append_range(Path, llvm::drop_begin(OtherPath)); } void print(raw_ostream &OS) const { diff --git a/llvm/lib/Transforms/Scalar/SimpleLoopUnswitch.cpp b/llvm/lib/Transforms/Scalar/SimpleLoopUnswitch.cpp index 4f7956514b7b5..4c6f6f12d7138 100644 --- a/llvm/lib/Transforms/Scalar/SimpleLoopUnswitch.cpp +++ b/llvm/lib/Transforms/Scalar/SimpleLoopUnswitch.cpp @@ -3641,14 +3641,12 @@ static bool unswitchLoop(Loop &L, DominatorTree &DT, LoopInfo &LI, } // Next check all loops nested within L. SmallVector Worklist; - Worklist.insert(Worklist.end(), L->getSubLoops().begin(), - L->getSubLoops().end()); + llvm::append_range(Worklist, L->getSubLoops()); while (!Worklist.empty()) { auto *CurLoop = Worklist.pop_back_val(); if (!PSI->isColdBlock(CurLoop->getHeader(), BFI)) return false; - Worklist.insert(Worklist.end(), CurLoop->getSubLoops().begin(), - CurLoop->getSubLoops().end()); + llvm::append_range(Worklist, CurLoop->getSubLoops()); } return true; }; diff --git a/llvm/lib/Transforms/Utils/CodeLayout.cpp b/llvm/lib/Transforms/Utils/CodeLayout.cpp index baaad8bb48f33..c76b3afef50c2 100644 --- a/llvm/lib/Transforms/Utils/CodeLayout.cpp +++ b/llvm/lib/Transforms/Utils/CodeLayout.cpp @@ -387,7 +387,7 @@ struct ChainEdge { void appendJump(JumpT *Jump) { Jumps.push_back(Jump); } void moveJumps(ChainEdge *Other) { - Jumps.insert(Jumps.end(), Other->Jumps.begin(), Other->Jumps.end()); + llvm::append_range(Jumps, Other->Jumps); Other->Jumps.clear(); Other->Jumps.shrink_to_fit(); } diff --git a/llvm/lib/Transforms/Utils/SampleProfileInference.cpp b/llvm/lib/Transforms/Utils/SampleProfileInference.cpp index 54d46117729c9..53bcaa6d3df03 100644 --- a/llvm/lib/Transforms/Utils/SampleProfileInference.cpp +++ b/llvm/lib/Transforms/Utils/SampleProfileInference.cpp @@ -672,8 +672,8 @@ class FlowAdjuster { // Concatenate the two paths std::vector Result; - Result.insert(Result.end(), ForwardPath.begin(), ForwardPath.end()); - Result.insert(Result.end(), BackwardPath.begin(), BackwardPath.end()); + llvm::append_range(Result, ForwardPath); + llvm::append_range(Result, BackwardPath); return Result; }