[BOLT][NFC] Use range-based STL wrappers

Replace `std::` algorithms taking begin/end iterators with `llvm::` counterparts
accepting ranges.

Reviewed By: rafauler

Differential Revision: https://reviews.llvm.org/D128154
This commit is contained in:
Amir Ayupov 2022-06-23 22:15:47 -07:00 committed by Amir Aupov
parent 9ffe1b0a79
commit d2c8769936
38 changed files with 360 additions and 437 deletions

View File

@ -634,14 +634,12 @@ public:
/// Test if BB is a predecessor of this block.
bool isPredecessor(const BinaryBasicBlock *BB) const {
auto Itr = std::find(Predecessors.begin(), Predecessors.end(), BB);
return Itr != Predecessors.end();
return llvm::is_contained(Predecessors, BB);
}
/// Test if BB is a successor of this block.
bool isSuccessor(const BinaryBasicBlock *BB) const {
auto Itr = std::find(Successors.begin(), Successors.end(), BB);
return Itr != Successors.end();
return llvm::is_contained(Successors, BB);
}
/// Test if this BB has a valid execution count.

View File

@ -112,7 +112,7 @@ public:
bool nameStartsWith(StringRef Prefix) const;
bool hasSymbol(const MCSymbol *Symbol) const {
return std::find(Symbols.begin(), Symbols.end(), Symbol) != Symbols.end();
return llvm::is_contained(Symbols, Symbol);
}
bool isAbsolute() const;

View File

@ -716,9 +716,8 @@ private:
BB->setOffset(Offset);
BasicBlockOffsets.emplace_back(Offset, BB);
assert(std::is_sorted(BasicBlockOffsets.begin(), BasicBlockOffsets.end(),
CompareBasicBlockOffsets()) &&
std::is_sorted(begin(), end()));
assert(llvm::is_sorted(BasicBlockOffsets, CompareBasicBlockOffsets()) &&
llvm::is_sorted(blocks()));
return BB;
}

View File

@ -544,7 +544,7 @@ BinaryBasicBlock::getBranchStats(const BinaryBasicBlock *Succ) const {
}
if (TotalCount > 0) {
auto Itr = std::find(Successors.begin(), Successors.end(), Succ);
auto Itr = llvm::find(Successors, Succ);
assert(Itr != Successors.end());
const BinaryBranchInfo &BI = BranchInfo[Itr - Successors.begin()];
if (BI.Count && BI.Count != COUNT_NO_PROFILE) {

View File

@ -718,9 +718,8 @@ void BinaryContext::skipMarkedFragments() {
BF->setSimple(false);
BF->setHasSplitJumpTable(true);
std::for_each(BF->Fragments.begin(), BF->Fragments.end(), addToWorklist);
std::for_each(BF->ParentFragments.begin(), BF->ParentFragments.end(),
addToWorklist);
llvm::for_each(BF->Fragments, addToWorklist);
llvm::for_each(BF->ParentFragments, addToWorklist);
}
if (!FragmentsToSkip.empty())
errs() << "BOLT-WARNING: skipped " << FragmentsToSkip.size() << " function"
@ -1059,10 +1058,9 @@ void BinaryContext::generateSymbolHashes() {
// First check if a non-anonymous alias exists and move it to the front.
if (BD.getSymbols().size() > 1) {
auto Itr = std::find_if(BD.getSymbols().begin(), BD.getSymbols().end(),
[&](const MCSymbol *Symbol) {
return !isInternalSymbolName(Symbol->getName());
});
auto Itr = llvm::find_if(BD.getSymbols(), [&](const MCSymbol *Symbol) {
return !isInternalSymbolName(Symbol->getName());
});
if (Itr != BD.getSymbols().end()) {
size_t Idx = std::distance(BD.getSymbols().begin(), Itr);
std::swap(BD.getSymbols()[0], BD.getSymbols()[Idx]);
@ -1224,8 +1222,7 @@ void BinaryContext::foldFunction(BinaryFunction &ChildBF,
ChildBF.getSymbols().clear();
// Move other names the child function is known under.
std::move(ChildBF.Aliases.begin(), ChildBF.Aliases.end(),
std::back_inserter(ParentBF.Aliases));
llvm::move(ChildBF.Aliases, std::back_inserter(ParentBF.Aliases));
ChildBF.Aliases.clear();
if (HasRelocations) {
@ -1392,32 +1389,29 @@ unsigned BinaryContext::addDebugFilenameToUnit(const uint32_t DestCUID,
std::vector<BinaryFunction *> BinaryContext::getSortedFunctions() {
std::vector<BinaryFunction *> SortedFunctions(BinaryFunctions.size());
std::transform(BinaryFunctions.begin(), BinaryFunctions.end(),
SortedFunctions.begin(),
[](std::pair<const uint64_t, BinaryFunction> &BFI) {
return &BFI.second;
});
llvm::transform(BinaryFunctions, SortedFunctions.begin(),
[](std::pair<const uint64_t, BinaryFunction> &BFI) {
return &BFI.second;
});
std::stable_sort(SortedFunctions.begin(), SortedFunctions.end(),
[](const BinaryFunction *A, const BinaryFunction *B) {
if (A->hasValidIndex() && B->hasValidIndex()) {
return A->getIndex() < B->getIndex();
}
return A->hasValidIndex();
});
llvm::stable_sort(SortedFunctions,
[](const BinaryFunction *A, const BinaryFunction *B) {
if (A->hasValidIndex() && B->hasValidIndex()) {
return A->getIndex() < B->getIndex();
}
return A->hasValidIndex();
});
return SortedFunctions;
}
std::vector<BinaryFunction *> BinaryContext::getAllBinaryFunctions() {
std::vector<BinaryFunction *> AllFunctions;
AllFunctions.reserve(BinaryFunctions.size() + InjectedBinaryFunctions.size());
std::transform(BinaryFunctions.begin(), BinaryFunctions.end(),
std::back_inserter(AllFunctions),
[](std::pair<const uint64_t, BinaryFunction> &BFI) {
return &BFI.second;
});
std::copy(InjectedBinaryFunctions.begin(), InjectedBinaryFunctions.end(),
std::back_inserter(AllFunctions));
llvm::transform(BinaryFunctions, std::back_inserter(AllFunctions),
[](std::pair<const uint64_t, BinaryFunction> &BFI) {
return &BFI.second;
});
llvm::copy(InjectedBinaryFunctions, std::back_inserter(AllFunctions));
return AllFunctions;
}
@ -1494,17 +1488,15 @@ void BinaryContext::preprocessDebugInfo() {
llvm::errs() << "BOLT-WARNING: BOLT does not support mix mode binary with "
"DWARF5 and DWARF{2,3,4}.\n";
std::sort(AllRanges.begin(), AllRanges.end());
llvm::sort(AllRanges);
for (auto &KV : BinaryFunctions) {
const uint64_t FunctionAddress = KV.first;
BinaryFunction &Function = KV.second;
auto It = std::partition_point(
AllRanges.begin(), AllRanges.end(),
[=](CURange R) { return R.HighPC <= FunctionAddress; });
if (It != AllRanges.end() && It->LowPC <= FunctionAddress) {
auto It = llvm::partition_point(
AllRanges, [=](CURange R) { return R.HighPC <= FunctionAddress; });
if (It != AllRanges.end() && It->LowPC <= FunctionAddress)
Function.setDWARFUnit(It->Unit);
}
}
// Discover units with debug info that needs to be updated.
@ -2218,8 +2210,7 @@ DebugAddressRangesVector BinaryContext::translateModuleAddressRanges(
break;
const DebugAddressRangesVector FunctionRanges =
Function.getOutputAddressRanges();
std::move(std::begin(FunctionRanges), std::end(FunctionRanges),
std::back_inserter(OutputRanges));
llvm::move(FunctionRanges, std::back_inserter(OutputRanges));
std::advance(BFI, 1);
}
}

View File

@ -333,8 +333,7 @@ bool BinaryEmitter::emitFunction(BinaryFunction &Function, bool EmitColdPart) {
// Only write CIE CFI insns that LLVM will not already emit
const std::vector<MCCFIInstruction> &FrameInstrs =
MAI->getInitialFrameState();
if (std::find(FrameInstrs.begin(), FrameInstrs.end(), CFIInstr) ==
FrameInstrs.end())
if (!llvm::is_contained(FrameInstrs, CFIInstr))
emitCFIInstruction(CFIInstr);
}
}
@ -1087,7 +1086,7 @@ void BinaryEmitter::emitDebugLineInfoForUnprocessedCUs() {
StmtListOffsets.push_back(*StmtList);
}
std::sort(StmtListOffsets.begin(), StmtListOffsets.end());
llvm::sort(StmtListOffsets);
// For each CU that was not processed, emit its line info as a binary blob.
for (const std::unique_ptr<DWARFUnit> &CU : BC.DwCtx->compile_units()) {
@ -1105,8 +1104,7 @@ void BinaryEmitter::emitDebugLineInfoForUnprocessedCUs() {
// Statement list ends where the next unit contribution begins, or at the
// end of the section.
auto It =
std::upper_bound(StmtListOffsets.begin(), StmtListOffsets.end(), Begin);
auto It = llvm::upper_bound(StmtListOffsets, Begin);
const uint64_t End =
It == StmtListOffsets.end() ? DebugLineContents.size() : *It;

View File

@ -281,9 +281,9 @@ BinaryFunction::getBasicBlockContainingOffset(uint64_t Offset) {
* BasicBlockOffsets.end(),
* CompareBasicBlockOffsets())));
*/
auto I = std::upper_bound(BasicBlockOffsets.begin(), BasicBlockOffsets.end(),
BasicBlockOffset(Offset, nullptr),
CompareBasicBlockOffsets());
auto I =
llvm::upper_bound(BasicBlockOffsets, BasicBlockOffset(Offset, nullptr),
CompareBasicBlockOffsets());
assert(I != BasicBlockOffsets.begin() && "first basic block not at offset 0");
--I;
BinaryBasicBlock *BB = I->second;
@ -561,10 +561,9 @@ void BinaryFunction::print(raw_ostream &OS, std::string Annotation,
std::vector<uint64_t> Indices(BB->succ_size());
std::iota(Indices.begin(), Indices.end(), 0);
if (BB->succ_size() > 2 && BB->getKnownExecutionCount()) {
std::stable_sort(Indices.begin(), Indices.end(),
[&](const uint64_t A, const uint64_t B) {
return BB->BranchInfo[B] < BB->BranchInfo[A];
});
llvm::stable_sort(Indices, [&](const uint64_t A, const uint64_t B) {
return BB->BranchInfo[B] < BB->BranchInfo[A];
});
}
ListSeparator LS;
for (unsigned I = 0; I < Indices.size(); ++I) {
@ -1718,7 +1717,7 @@ void BinaryFunction::postProcessJumpTables() {
// Remove duplicates branches. We can get a bunch of them from jump tables.
// Without doing jump table value profiling we don't have use for extra
// (duplicate) branches.
std::sort(TakenBranches.begin(), TakenBranches.end());
llvm::sort(TakenBranches);
auto NewEnd = std::unique(TakenBranches.begin(), TakenBranches.end());
TakenBranches.erase(NewEnd, TakenBranches.end());
}
@ -3003,8 +3002,7 @@ void BinaryFunction::dumpGraph(raw_ostream &OS) const {
<< "node [fontname=courier, shape=box, style=filled, colorscheme=brbg9]\n";
uint64_t Offset = Address;
for (BinaryBasicBlock *BB : BasicBlocks) {
auto LayoutPos =
std::find(BasicBlocksLayout.begin(), BasicBlocksLayout.end(), BB);
auto LayoutPos = llvm::find(BasicBlocksLayout, BB);
unsigned Layout = LayoutPos - BasicBlocksLayout.begin();
const char *ColdStr = BB->isCold() ? " (cold)" : "";
std::vector<std::string> Attrs;
@ -3187,8 +3185,7 @@ bool BinaryFunction::validateCFG() const {
}
for (const BinaryBasicBlock *LPBlock : BB->landing_pads()) {
if (std::find(LPBlock->throw_begin(), LPBlock->throw_end(), BB) ==
LPBlock->throw_end()) {
if (!llvm::is_contained(LPBlock->throwers(), BB)) {
errs() << "BOLT-ERROR: inconsistent landing pad detected in " << *this
<< ": " << BB->getName() << " is in LandingPads but not in "
<< LPBlock->getName() << " Throwers\n";
@ -3196,8 +3193,7 @@ bool BinaryFunction::validateCFG() const {
}
}
for (const BinaryBasicBlock *Thrower : BB->throwers()) {
if (std::find(Thrower->lp_begin(), Thrower->lp_end(), BB) ==
Thrower->lp_end()) {
if (!llvm::is_contained(Thrower->landing_pads(), BB)) {
errs() << "BOLT-ERROR: inconsistent thrower detected in " << *this
<< ": " << BB->getName() << " is in Throwers list but not in "
<< Thrower->getName() << " LandingPads\n";
@ -3670,7 +3666,7 @@ void BinaryFunction::updateLayout(BinaryBasicBlock *Start,
}
// Insert new blocks in the layout immediately after Start.
auto Pos = std::find(layout_begin(), layout_end(), Start);
auto Pos = llvm::find(layout(), Start);
assert(Pos != layout_end());
BasicBlockListType::iterator Begin =
std::next(BasicBlocks.begin(), getIndex(Start) + 1);
@ -4184,10 +4180,10 @@ DebugAddressRangesVector BinaryFunction::translateInputToOutputRanges(
// If the function hasn't changed return the same ranges.
if (!isEmitted()) {
OutputRanges.resize(InputRanges.size());
std::transform(InputRanges.begin(), InputRanges.end(), OutputRanges.begin(),
[](const DWARFAddressRange &Range) {
return DebugAddressRange(Range.LowPC, Range.HighPC);
});
llvm::transform(InputRanges, OutputRanges.begin(),
[](const DWARFAddressRange &Range) {
return DebugAddressRange(Range.LowPC, Range.HighPC);
});
return OutputRanges;
}
@ -4207,9 +4203,9 @@ DebugAddressRangesVector BinaryFunction::translateInputToOutputRanges(
const uint64_t InputEndOffset =
std::min(Range.HighPC - getAddress(), getSize());
auto BBI = std::upper_bound(
BasicBlockOffsets.begin(), BasicBlockOffsets.end(),
BasicBlockOffset(InputOffset, nullptr), CompareBasicBlockOffsets());
auto BBI = llvm::upper_bound(BasicBlockOffsets,
BasicBlockOffset(InputOffset, nullptr),
CompareBasicBlockOffsets());
--BBI;
do {
const BinaryBasicBlock *BB = BBI->second;
@ -4246,7 +4242,7 @@ DebugAddressRangesVector BinaryFunction::translateInputToOutputRanges(
}
// Post-processing pass to sort and merge ranges.
std::sort(OutputRanges.begin(), OutputRanges.end());
llvm::sort(OutputRanges);
DebugAddressRangesVector MergedRanges;
PrevEndAddress = 0;
for (const DebugAddressRange &Range : OutputRanges) {
@ -4315,9 +4311,9 @@ DebugLocationsVector BinaryFunction::translateInputToOutputLocationList(
}
uint64_t InputOffset = Start - getAddress();
const uint64_t InputEndOffset = std::min(End - getAddress(), getSize());
auto BBI = std::upper_bound(
BasicBlockOffsets.begin(), BasicBlockOffsets.end(),
BasicBlockOffset(InputOffset, nullptr), CompareBasicBlockOffsets());
auto BBI = llvm::upper_bound(BasicBlockOffsets,
BasicBlockOffset(InputOffset, nullptr),
CompareBasicBlockOffsets());
--BBI;
do {
const BinaryBasicBlock *BB = BBI->second;
@ -4354,9 +4350,8 @@ DebugLocationsVector BinaryFunction::translateInputToOutputLocationList(
}
// Sort and merge adjacent entries with identical location.
std::stable_sort(
OutputLL.begin(), OutputLL.end(),
[](const DebugLocationEntry &A, const DebugLocationEntry &B) {
llvm::stable_sort(
OutputLL, [](const DebugLocationEntry &A, const DebugLocationEntry &B) {
return A.LowPC < B.LowPC;
});
DebugLocationsVector MergedLL;

View File

@ -313,10 +313,10 @@ void DebugAddrWriter::AddressForDWOCU::dump() {
std::vector<IndexAddressPair> SortedMap(indexToAddressBegin(),
indexToAdddessEnd());
// Sorting address in increasing order of indices.
std::sort(SortedMap.begin(), SortedMap.end(),
[](const IndexAddressPair &A, const IndexAddressPair &B) {
return A.first < B.first;
});
llvm::sort(SortedMap,
[](const IndexAddressPair &A, const IndexAddressPair &B) {
return A.first < B.first;
});
for (auto &Pair : SortedMap)
dbgs() << Twine::utohexstr(Pair.second) << "\t" << Pair.first << "\n";
}
@ -375,10 +375,10 @@ AddressSectionBuffer DebugAddrWriter::finalize() {
std::vector<IndexAddressPair> SortedMap(AM->second.indexToAddressBegin(),
AM->second.indexToAdddessEnd());
// Sorting address in increasing order of indices.
std::sort(SortedMap.begin(), SortedMap.end(),
[](const IndexAddressPair &A, const IndexAddressPair &B) {
return A.first < B.first;
});
llvm::sort(SortedMap,
[](const IndexAddressPair &A, const IndexAddressPair &B) {
return A.first < B.first;
});
uint8_t AddrSize = CU->getAddressByteSize();
uint32_t Counter = 0;
@ -449,10 +449,10 @@ AddressSectionBuffer DebugAddrWriterDwarf5::finalize() {
AMIter->second.indexToAddressBegin(),
AMIter->second.indexToAdddessEnd());
// Sorting address in increasing order of indices.
std::sort(SortedMap.begin(), SortedMap.end(),
[](const IndexAddressPair &A, const IndexAddressPair &B) {
return A.first < B.first;
});
llvm::sort(SortedMap,
[](const IndexAddressPair &A, const IndexAddressPair &B) {
return A.first < B.first;
});
// Writing out Header
const uint32_t Length = SortedMap.size() * AddrSize + 4;
support::endian::write(AddressStream, Length, Endian);
@ -841,22 +841,20 @@ std::string SimpleBinaryPatcher::patchBinary(StringRef BinaryContents) {
CUOffsetMap DebugInfoBinaryPatcher::computeNewOffsets(DWARFContext &DWCtx,
bool IsDWOContext) {
CUOffsetMap CUMap;
std::sort(DebugPatches.begin(), DebugPatches.end(),
[](const UniquePatchPtrType &V1, const UniquePatchPtrType &V2) {
if (V1.get()->Offset == V2.get()->Offset) {
if (V1->Kind == DebugPatchKind::NewDebugEntry &&
V2->Kind == DebugPatchKind::NewDebugEntry)
return reinterpret_cast<const NewDebugEntry *>(V1.get())
->CurrentOrder <
reinterpret_cast<const NewDebugEntry *>(V2.get())
->CurrentOrder;
llvm::sort(DebugPatches, [](const UniquePatchPtrType &V1,
const UniquePatchPtrType &V2) {
if (V1.get()->Offset == V2.get()->Offset) {
if (V1->Kind == DebugPatchKind::NewDebugEntry &&
V2->Kind == DebugPatchKind::NewDebugEntry)
return reinterpret_cast<const NewDebugEntry *>(V1.get())->CurrentOrder <
reinterpret_cast<const NewDebugEntry *>(V2.get())->CurrentOrder;
// This is a case where we are modifying first entry of next
// DIE, and adding a new one.
return V1->Kind == DebugPatchKind::NewDebugEntry;
}
return V1.get()->Offset < V2.get()->Offset;
});
// This is a case where we are modifying first entry of next
// DIE, and adding a new one.
return V1->Kind == DebugPatchKind::NewDebugEntry;
}
return V1.get()->Offset < V2.get()->Offset;
});
DWARFUnitVector::compile_unit_range CompileUnits =
IsDWOContext ? DWCtx.dwo_compile_units() : DWCtx.compile_units();

View File

@ -107,7 +107,7 @@ void DynoStats::print(raw_ostream &OS, const DynoStats *Other,
SortedHistogram.emplace_back(Stat.second.first, Stat.first);
// Sort using lexicographic ordering
std::sort(SortedHistogram.begin(), SortedHistogram.end());
llvm::sort(SortedHistogram);
// Dump in ascending order: Start with Opcode with Highest execution
// count.

View File

@ -657,7 +657,7 @@ std::vector<char> CFIReaderWriter::generateEHFrameHeader(
std::map<uint64_t, uint64_t> PCToFDE;
// Presort array for binary search.
std::sort(FailedAddresses.begin(), FailedAddresses.end());
llvm::sort(FailedAddresses);
// Initialize PCToFDE using NewEHFrame.
for (dwarf::FrameEntry &Entry : NewEHFrame.entries()) {
@ -683,9 +683,7 @@ std::vector<char> CFIReaderWriter::generateEHFrameHeader(
};
LLVM_DEBUG(dbgs() << "BOLT-DEBUG: new .eh_frame contains "
<< std::distance(NewEHFrame.entries().begin(),
NewEHFrame.entries().end())
<< " entries\n");
<< llvm::size(NewEHFrame.entries()) << " entries\n");
// Add entries from the original .eh_frame corresponding to the functions
// that we did not update.
@ -707,9 +705,7 @@ std::vector<char> CFIReaderWriter::generateEHFrameHeader(
};
LLVM_DEBUG(dbgs() << "BOLT-DEBUG: old .eh_frame contains "
<< std::distance(OldEHFrame.entries().begin(),
OldEHFrame.entries().end())
<< " entries\n");
<< llvm::size(OldEHFrame.entries()) << " entries\n");
// Generate a new .eh_frame_hdr based on the new map.

View File

@ -1420,10 +1420,10 @@ void PrintProgramStats::runOnFunctions(BinaryContext &BC) {
if (ProfiledFunctions.size() > 10) {
if (opts::Verbosity >= 1) {
outs() << "BOLT-INFO: top called functions are:\n";
std::sort(ProfiledFunctions.begin(), ProfiledFunctions.end(),
[](const BinaryFunction *A, const BinaryFunction *B) {
return B->getExecutionCount() < A->getExecutionCount();
});
llvm::sort(ProfiledFunctions,
[](const BinaryFunction *A, const BinaryFunction *B) {
return B->getExecutionCount() < A->getExecutionCount();
});
auto SFI = ProfiledFunctions.begin();
auto SFIend = ProfiledFunctions.end();
for (unsigned I = 0u; I < opts::TopCalledLimit && SFI != SFIend;
@ -1433,8 +1433,7 @@ void PrintProgramStats::runOnFunctions(BinaryContext &BC) {
}
if (!opts::PrintSortedBy.empty() &&
std::find(opts::PrintSortedBy.begin(), opts::PrintSortedBy.end(),
DynoStats::FIRST_DYNO_STAT) == opts::PrintSortedBy.end()) {
!llvm::is_contained(opts::PrintSortedBy, DynoStats::FIRST_DYNO_STAT)) {
std::vector<const BinaryFunction *> Functions;
std::map<const BinaryFunction *, DynoStats> Stats;
@ -1448,24 +1447,22 @@ void PrintProgramStats::runOnFunctions(BinaryContext &BC) {
}
const bool SortAll =
std::find(opts::PrintSortedBy.begin(), opts::PrintSortedBy.end(),
DynoStats::LAST_DYNO_STAT) != opts::PrintSortedBy.end();
llvm::is_contained(opts::PrintSortedBy, DynoStats::LAST_DYNO_STAT);
const bool Ascending =
opts::DynoStatsSortOrderOpt == opts::DynoStatsSortOrder::Ascending;
if (SortAll) {
std::stable_sort(Functions.begin(), Functions.end(),
[Ascending, &Stats](const BinaryFunction *A,
const BinaryFunction *B) {
return Ascending ? Stats.at(A) < Stats.at(B)
: Stats.at(B) < Stats.at(A);
});
llvm::stable_sort(Functions,
[Ascending, &Stats](const BinaryFunction *A,
const BinaryFunction *B) {
return Ascending ? Stats.at(A) < Stats.at(B)
: Stats.at(B) < Stats.at(A);
});
} else {
std::stable_sort(
Functions.begin(), Functions.end(),
[Ascending, &Stats](const BinaryFunction *A,
const BinaryFunction *B) {
llvm::stable_sort(
Functions, [Ascending, &Stats](const BinaryFunction *A,
const BinaryFunction *B) {
const DynoStats &StatsA = Stats.at(A);
const DynoStats &StatsB = Stats.at(B);
return Ascending ? StatsA.lessThan(StatsB, opts::PrintSortedBy)
@ -1564,11 +1561,11 @@ void PrintProgramStats::runOnFunctions(BinaryContext &BC) {
}
if (!SuboptimalFuncs.empty()) {
std::sort(SuboptimalFuncs.begin(), SuboptimalFuncs.end(),
[](const BinaryFunction *A, const BinaryFunction *B) {
return A->getKnownExecutionCount() / A->getSize() >
B->getKnownExecutionCount() / B->getSize();
});
llvm::sort(SuboptimalFuncs,
[](const BinaryFunction *A, const BinaryFunction *B) {
return A->getKnownExecutionCount() / A->getSize() >
B->getKnownExecutionCount() / B->getSize();
});
outs() << "BOLT-INFO: " << SuboptimalFuncs.size()
<< " functions have "

View File

@ -801,8 +801,7 @@ private:
}
// Remove chain From from the list of active chains
auto Iter = std::remove(HotChains.begin(), HotChains.end(), From);
HotChains.erase(Iter, HotChains.end());
llvm::erase_value(HotChains, From);
// Invalidate caches
for (std::pair<Chain *, Edge *> EdgeIter : Into->edges())
@ -818,26 +817,23 @@ private:
SortedChains.push_back(&Chain);
// Sorting chains by density in decreasing order
std::stable_sort(
SortedChains.begin(), SortedChains.end(),
[](const Chain *C1, const Chain *C2) {
// Original entry point to the front
if (C1->isEntryPoint() != C2->isEntryPoint()) {
if (C1->isEntryPoint())
return true;
if (C2->isEntryPoint())
return false;
}
const double D1 = C1->density();
const double D2 = C2->density();
if (D1 != D2)
return D1 > D2;
// Making the order deterministic
return C1->id() < C2->id();
llvm::stable_sort(SortedChains, [](const Chain *C1, const Chain *C2) {
// Original entry point to the front
if (C1->isEntryPoint() != C2->isEntryPoint()) {
if (C1->isEntryPoint())
return true;
if (C2->isEntryPoint())
return false;
}
);
const double D1 = C1->density();
const double D2 = C2->density();
if (D1 != D2)
return D1 > D2;
// Making the order deterministic
return C1->id() < C2->id();
});
// Collect the basic blocks in the order specified by their chains
Order.reserve(BF.layout_size());

View File

@ -86,7 +86,7 @@ namespace {
void freezeClusters(const CallGraph &Cg, std::vector<Cluster> &Clusters) {
uint32_t TotalSize = 0;
std::sort(Clusters.begin(), Clusters.end(), compareClustersDensity);
llvm::sort(Clusters, compareClustersDensity);
for (Cluster &C : Clusters) {
uint32_t NewSize = TotalSize + C.size();
if (NewSize > FrozenPages * HugePageSize)
@ -150,13 +150,12 @@ std::vector<Cluster> clusterize(const CallGraph &Cg) {
for (Cluster &Cluster : Clusters)
FuncCluster[Cluster.targets().front()] = &Cluster;
std::sort(SortedFuncs.begin(), SortedFuncs.end(),
[&](const NodeId F1, const NodeId F2) {
const CallGraph::Node &Func1 = Cg.getNode(F1);
const CallGraph::Node &Func2 = Cg.getNode(F2);
return Func1.samples() * Func2.size() > // TODO: is this correct?
Func2.samples() * Func1.size();
});
llvm::sort(SortedFuncs, [&](const NodeId F1, const NodeId F2) {
const CallGraph::Node &Func1 = Cg.getNode(F1);
const CallGraph::Node &Func2 = Cg.getNode(F2);
return Func1.samples() * Func2.size() > // TODO: is this correct?
Func2.samples() * Func1.size();
});
// Process each function, and consider merging its cluster with the
// one containing its most likely predecessor.
@ -234,8 +233,7 @@ std::vector<Cluster> clusterize(const CallGraph &Cg) {
Visited.insert(Cluster);
}
std::sort(SortedClusters.begin(), SortedClusters.end(),
compareClustersDensity);
llvm::sort(SortedClusters, compareClustersDensity);
return SortedClusters;
}
@ -251,9 +249,9 @@ std::vector<Cluster> randomClusters(const CallGraph &Cg) {
Clusters.emplace_back(F, Cg.getNode(F));
}
std::sort(
Clusters.begin(), Clusters.end(),
[](const Cluster &A, const Cluster &B) { return A.size() < B.size(); });
llvm::sort(Clusters, [](const Cluster &A, const Cluster &B) {
return A.size() < B.size();
});
auto pickMergeCluster = [&Clusters](const size_t Idx) {
size_t MaxIdx = Idx + 1;

View File

@ -245,7 +245,7 @@ public:
// Making sure the comparison is deterministic
return L->Id < R->Id;
};
std::stable_sort(HotChains.begin(), HotChains.end(), DensityComparator);
llvm::stable_sort(HotChains, DensityComparator);
// Return the set of clusters that are left, which are the ones that
// didn't get merged (so their first func is its original func)
@ -453,9 +453,9 @@ private:
}
// Sort the pairs by the weight in reverse order
std::sort(
ArcsToMerge.begin(), ArcsToMerge.end(),
[](const Arc *L, const Arc *R) { return L->weight() > R->weight(); });
llvm::sort(ArcsToMerge, [](const Arc *L, const Arc *R) {
return L->weight() > R->weight();
});
// Merge the pairs of chains
for (const Arc *Arc : ArcsToMerge) {
@ -567,8 +567,7 @@ private:
Into->Score = score(Into);
// Remove chain From From the list of active chains
auto it = std::remove(HotChains.begin(), HotChains.end(), From);
HotChains.erase(it, HotChains.end());
llvm::erase_value(HotChains, From);
}
private:

View File

@ -479,11 +479,10 @@ void IdenticalCodeFolding::runOnFunctions(BinaryContext &BC) {
// Fold functions. Keep the order consistent across invocations with
// different options.
std::stable_sort(Twins.begin(), Twins.end(),
[](const BinaryFunction *A, const BinaryFunction *B) {
return A->getFunctionNumber() <
B->getFunctionNumber();
});
llvm::stable_sort(
Twins, [](const BinaryFunction *A, const BinaryFunction *B) {
return A->getFunctionNumber() < B->getFunctionNumber();
});
BinaryFunction *ParentBF = Twins[0];
for (unsigned I = 1; I < Twins.size(); ++I) {

View File

@ -238,17 +238,16 @@ IndirectCallPromotion::getCallTargets(BinaryBasicBlock &BB,
}
// Sort by symbol then addr.
std::sort(Targets.begin(), Targets.end(),
[](const Callsite &A, const Callsite &B) {
if (A.To.Sym && B.To.Sym)
return A.To.Sym < B.To.Sym;
else if (A.To.Sym && !B.To.Sym)
return true;
else if (!A.To.Sym && B.To.Sym)
return false;
else
return A.To.Addr < B.To.Addr;
});
llvm::sort(Targets, [](const Callsite &A, const Callsite &B) {
if (A.To.Sym && B.To.Sym)
return A.To.Sym < B.To.Sym;
else if (A.To.Sym && !B.To.Sym)
return true;
else if (!A.To.Sym && B.To.Sym)
return false;
else
return A.To.Addr < B.To.Addr;
});
// Targets may contain multiple entries to the same target, but using
// different indices. Their profile will report the same number of branches
@ -294,21 +293,18 @@ IndirectCallPromotion::getCallTargets(BinaryBasicBlock &BB,
// Sort by target count, number of indices in case of jump table, and
// mispredicts. We prioritize targets with high count, small number of indices
// and high mispredicts. Break ties by selecting targets with lower addresses.
std::stable_sort(Targets.begin(), Targets.end(),
[](const Callsite &A, const Callsite &B) {
if (A.Branches != B.Branches)
return A.Branches > B.Branches;
if (A.JTIndices.size() != B.JTIndices.size())
return A.JTIndices.size() < B.JTIndices.size();
if (A.Mispreds != B.Mispreds)
return A.Mispreds > B.Mispreds;
return A.To.Addr < B.To.Addr;
});
llvm::stable_sort(Targets, [](const Callsite &A, const Callsite &B) {
if (A.Branches != B.Branches)
return A.Branches > B.Branches;
if (A.JTIndices.size() != B.JTIndices.size())
return A.JTIndices.size() < B.JTIndices.size();
if (A.Mispreds != B.Mispreds)
return A.Mispreds > B.Mispreds;
return A.To.Addr < B.To.Addr;
});
// Remove non-symbol targets
auto Last = std::remove_if(Targets.begin(), Targets.end(),
[](const Callsite &CS) { return !CS.To.Sym; });
Targets.erase(Last, Targets.end());
llvm::erase_if(Targets, [](const Callsite &CS) { return !CS.To.Sym; });
LLVM_DEBUG(if (BF.getJumpTable(Inst)) {
uint64_t TotalCount = 0;
@ -471,14 +467,14 @@ IndirectCallPromotion::maybeGetHotJumpTableTargets(BinaryBasicBlock &BB,
HotTarget.second = Index;
}
std::transform(
HotTargetMap.begin(), HotTargetMap.end(), std::back_inserter(HotTargets),
llvm::transform(
HotTargetMap, std::back_inserter(HotTargets),
[](const std::pair<MCSymbol *, std::pair<uint64_t, uint64_t>> &A) {
return A.second;
});
// Sort with highest counts first.
std::sort(HotTargets.rbegin(), HotTargets.rend());
llvm::sort(reverse(HotTargets));
LLVM_DEBUG({
dbgs() << "BOLT-INFO: ICP jump table hot targets:\n";
@ -566,9 +562,7 @@ IndirectCallPromotion::findCallTargetSymbols(std::vector<Callsite> &Targets,
NewTargets.push_back(Target);
std::vector<uint64_t>({JTIndex}).swap(NewTargets.back().JTIndices);
Target.JTIndices.erase(std::remove(Target.JTIndices.begin(),
Target.JTIndices.end(), JTIndex),
Target.JTIndices.end());
llvm::erase_value(Target.JTIndices, JTIndex);
// Keep fixCFG counts sane if more indices use this same target later
assert(IndicesPerTarget[Target.To.Sym] > 0 && "wrong map");
@ -581,7 +575,7 @@ IndirectCallPromotion::findCallTargetSymbols(std::vector<Callsite> &Targets,
Target.Branches -= NewTargets.back().Branches;
Target.Mispreds -= NewTargets.back().Mispreds;
}
std::copy(Targets.begin(), Targets.end(), std::back_inserter(NewTargets));
llvm::copy(Targets, std::back_inserter(NewTargets));
std::swap(NewTargets, Targets);
N = I;
@ -1168,7 +1162,7 @@ void IndirectCallPromotion::runOnFunctions(BinaryContext &BC) {
}
// Sort callsites by execution count.
std::sort(IndirectCalls.rbegin(), IndirectCalls.rend());
llvm::sort(reverse(IndirectCalls));
// Find callsites that contribute to the top "opts::ICPTopCallsites"%
// number of calls.

View File

@ -353,10 +353,10 @@ Inliner::inlineCall(BinaryBasicBlock &CallerBB,
// Add CFG edges to the basic blocks of the inlined instance.
std::vector<BinaryBasicBlock *> Successors(BB.succ_size());
std::transform(BB.succ_begin(), BB.succ_end(), Successors.begin(),
[&InlinedBBMap](const BinaryBasicBlock *BB) {
return InlinedBBMap.at(BB);
});
llvm::transform(BB.successors(), Successors.begin(),
[&InlinedBBMap](const BinaryBasicBlock *BB) {
return InlinedBBMap.at(BB);
});
if (CallerFunction.hasValidProfile() && Callee.hasValidProfile())
InlinedBB->addSuccessors(Successors.begin(), Successors.end(),
@ -397,11 +397,10 @@ bool Inliner::inlineCallsInFunction(BinaryFunction &Function) {
BinaryContext &BC = Function.getBinaryContext();
std::vector<BinaryBasicBlock *> Blocks(Function.layout().begin(),
Function.layout().end());
std::sort(Blocks.begin(), Blocks.end(),
[](const BinaryBasicBlock *BB1, const BinaryBasicBlock *BB2) {
return BB1->getKnownExecutionCount() >
BB2->getKnownExecutionCount();
});
llvm::sort(
Blocks, [](const BinaryBasicBlock *BB1, const BinaryBasicBlock *BB2) {
return BB1->getKnownExecutionCount() > BB2->getKnownExecutionCount();
});
bool DidInlining = false;
for (BinaryBasicBlock *BB : Blocks) {
@ -520,11 +519,10 @@ void Inliner::runOnFunctions(BinaryContext &BC) {
continue;
ConsideredFunctions.push_back(&Function);
}
std::sort(ConsideredFunctions.begin(), ConsideredFunctions.end(),
[](const BinaryFunction *A, const BinaryFunction *B) {
return B->getKnownExecutionCount() <
A->getKnownExecutionCount();
});
llvm::sort(ConsideredFunctions, [](const BinaryFunction *A,
const BinaryFunction *B) {
return B->getKnownExecutionCount() < A->getKnownExecutionCount();
});
for (BinaryFunction *Function : ConsideredFunctions) {
if (opts::InlineLimit && NumInlinedCallSites >= opts::InlineLimit)
break;

View File

@ -578,9 +578,8 @@ void Instrumentation::runOnFunctions(BinaryContext &BC) {
MCSymbol *Target = BC.registerNameAtAddress(
"__bolt_instr_fini", FiniSection->getAddress(), 0, 0);
auto IsLEA = [&BC](const MCInst &Inst) { return BC.MIB->isLEA64r(Inst); };
const auto LEA =
std::find_if(std::next(std::find_if(BB.rbegin(), BB.rend(), IsLEA)),
BB.rend(), IsLEA);
const auto LEA = std::find_if(
std::next(llvm::find_if(reverse(BB), IsLEA)), BB.rend(), IsLEA);
LEA->getOperand(4).setExpr(
MCSymbolRefExpr::create(Target, MCSymbolRefExpr::VK_None, *BC.Ctx));
} else {

View File

@ -89,9 +89,8 @@ LongJmpPass::createNewStub(BinaryBasicBlock &SourceBB, const MCSymbol *TgtSym,
auto registerInMap = [&](StubGroupsTy &Map) {
StubGroupTy &StubGroup = Map[TgtSym];
StubGroup.insert(
std::lower_bound(
StubGroup.begin(), StubGroup.end(),
std::make_pair(AtAddress, nullptr),
llvm::lower_bound(
StubGroup, std::make_pair(AtAddress, nullptr),
[&](const std::pair<uint64_t, BinaryBasicBlock *> &LHS,
const std::pair<uint64_t, BinaryBasicBlock *> &RHS) {
return LHS.first < RHS.first;
@ -126,8 +125,8 @@ BinaryBasicBlock *LongJmpPass::lookupStubFromGroup(
const StubGroupTy &Candidates = CandidatesIter->second;
if (Candidates.empty())
return nullptr;
auto Cand = std::lower_bound(
Candidates.begin(), Candidates.end(), std::make_pair(DotAddress, nullptr),
auto Cand = llvm::lower_bound(
Candidates, std::make_pair(DotAddress, nullptr),
[&](const std::pair<uint64_t, BinaryBasicBlock *> &LHS,
const std::pair<uint64_t, BinaryBasicBlock *> &RHS) {
return LHS.first < RHS.first;
@ -256,11 +255,11 @@ void LongJmpPass::updateStubGroups() {
for (auto &KeyVal : StubGroups) {
for (StubTy &Elem : KeyVal.second)
Elem.first = BBAddresses[Elem.second];
std::sort(KeyVal.second.begin(), KeyVal.second.end(),
[&](const std::pair<uint64_t, BinaryBasicBlock *> &LHS,
const std::pair<uint64_t, BinaryBasicBlock *> &RHS) {
return LHS.first < RHS.first;
});
llvm::sort(KeyVal.second,
[&](const std::pair<uint64_t, BinaryBasicBlock *> &LHS,
const std::pair<uint64_t, BinaryBasicBlock *> &RHS) {
return LHS.first < RHS.first;
});
}
};

View File

@ -73,10 +73,9 @@ bool LoopInversionPass::runOnFunction(BinaryFunction &BF) {
if (IsChanged) {
BinaryFunction::BasicBlockOrderType NewOrder = BF.getLayout();
std::sort(NewOrder.begin(), NewOrder.end(),
[&](BinaryBasicBlock *BB1, BinaryBasicBlock *BB2) {
return BB1->getLayoutIndex() < BB2->getLayoutIndex();
});
llvm::sort(NewOrder, [&](BinaryBasicBlock *BB1, BinaryBasicBlock *BB2) {
return BB1->getLayoutIndex() < BB2->getLayoutIndex();
});
BF.updateBasicBlockLayout(NewOrder);
}

View File

@ -207,7 +207,7 @@ std::vector<Cluster> pettisAndHansen(const CallGraph &Cg) {
for (Cluster *C : LiveClusters)
OutClusters.push_back(std::move(*C));
std::sort(OutClusters.begin(), OutClusters.end(), compareClustersDensity);
llvm::sort(OutClusters, compareClustersDensity);
return OutClusters;
}

View File

@ -197,8 +197,8 @@ void RegReAssign::rankRegisters(BinaryFunction &Function) {
}
}
std::iota(RankedRegs.begin(), RankedRegs.end(), 0); // 0, 1, 2, 3...
std::sort(RankedRegs.begin(), RankedRegs.end(),
[&](size_t A, size_t B) { return RegScore[A] > RegScore[B]; });
llvm::sort(RankedRegs,
[&](size_t A, size_t B) { return RegScore[A] > RegScore[B]; });
LLVM_DEBUG({
for (size_t Reg : RankedRegs) {

View File

@ -244,7 +244,7 @@ void PHGreedyClusterAlgorithm::initQueue(std::vector<EdgeTy> &Queue,
};
// Sort edges in increasing profile count order.
std::sort(Queue.begin(), Queue.end(), Comp);
llvm::sort(Queue, Comp);
}
void PHGreedyClusterAlgorithm::adjustQueue(std::vector<EdgeTy> &Queue,
@ -385,7 +385,7 @@ void MinBranchGreedyClusterAlgorithm::adjustQueue(std::vector<EdgeTy> &Queue,
// Sort remaining edges in increasing weight order.
Queue.swap(NewQueue);
std::sort(Queue.begin(), Queue.end(), Comp);
llvm::sort(Queue, Comp);
}
bool MinBranchGreedyClusterAlgorithm::areClustersCompatible(

View File

@ -275,8 +275,8 @@ ReorderData::sortedByFunc(BinaryContext &BC, const BinarySection &Section,
DataOrder Order = baseOrder(BC, Section);
unsigned SplitPoint = Order.size();
std::sort(
Order.begin(), Order.end(),
llvm::sort(
Order,
[&](const DataOrder::value_type &A, const DataOrder::value_type &B) {
// Total execution counts of functions referencing BD.
const uint64_t ACount = BDtoFuncCount[A.first];
@ -307,17 +307,17 @@ ReorderData::sortedByCount(BinaryContext &BC,
DataOrder Order = baseOrder(BC, Section);
unsigned SplitPoint = Order.size();
std::sort(Order.begin(), Order.end(),
[](const DataOrder::value_type &A, const DataOrder::value_type &B) {
// Weight by number of loads/data size.
const double AWeight = double(A.second) / A.first->getSize();
const double BWeight = double(B.second) / B.first->getSize();
return (AWeight > BWeight ||
(AWeight == BWeight &&
(A.first->getSize() < B.first->getSize() ||
(A.first->getSize() == B.first->getSize() &&
A.first->getAddress() < B.first->getAddress()))));
});
llvm::sort(Order, [](const DataOrder::value_type &A,
const DataOrder::value_type &B) {
// Weight by number of loads/data size.
const double AWeight = double(A.second) / A.first->getSize();
const double BWeight = double(B.second) / B.first->getSize();
return (AWeight > BWeight ||
(AWeight == BWeight &&
(A.first->getSize() < B.first->getSize() ||
(A.first->getSize() == B.first->getSize() &&
A.first->getAddress() < B.first->getAddress()))));
});
for (unsigned Idx = 0; Idx < Order.size(); ++Idx) {
if (!Order[Idx].second) {

View File

@ -292,28 +292,26 @@ void ReorderFunctions::runOnFunctions(BinaryContext &BC) {
{
std::vector<BinaryFunction *> SortedFunctions(BFs.size());
uint32_t Index = 0;
std::transform(BFs.begin(),
BFs.end(),
SortedFunctions.begin(),
[](std::pair<const uint64_t, BinaryFunction> &BFI) {
return &BFI.second;
});
std::stable_sort(SortedFunctions.begin(), SortedFunctions.end(),
[&](const BinaryFunction *A, const BinaryFunction *B) {
if (A->isIgnored())
return false;
const size_t PadA = opts::padFunction(*A);
const size_t PadB = opts::padFunction(*B);
if (!PadA || !PadB) {
if (PadA)
return true;
if (PadB)
return false;
}
return !A->hasProfile() &&
(B->hasProfile() ||
(A->getExecutionCount() > B->getExecutionCount()));
});
llvm::transform(BFs, SortedFunctions.begin(),
[](std::pair<const uint64_t, BinaryFunction> &BFI) {
return &BFI.second;
});
llvm::stable_sort(SortedFunctions, [&](const BinaryFunction *A,
const BinaryFunction *B) {
if (A->isIgnored())
return false;
const size_t PadA = opts::padFunction(*A);
const size_t PadB = opts::padFunction(*B);
if (!PadA || !PadB) {
if (PadA)
return true;
if (PadB)
return false;
}
return !A->hasProfile() &&
(B->hasProfile() ||
(A->getExecutionCount() > B->getExecutionCount()));
});
for (BinaryFunction *BF : SortedFunctions)
if (BF->hasProfile())
BF->setIndex(Index++);
@ -409,24 +407,22 @@ void ReorderFunctions::runOnFunctions(BinaryContext &BC) {
if (FuncsFile || LinkSectionsFile) {
std::vector<BinaryFunction *> SortedFunctions(BFs.size());
std::transform(BFs.begin(), BFs.end(), SortedFunctions.begin(),
[](std::pair<const uint64_t, BinaryFunction> &BFI) {
return &BFI.second;
});
llvm::transform(BFs, SortedFunctions.begin(),
[](std::pair<const uint64_t, BinaryFunction> &BFI) {
return &BFI.second;
});
// Sort functions by index.
std::stable_sort(
SortedFunctions.begin(),
SortedFunctions.end(),
[](const BinaryFunction *A, const BinaryFunction *B) {
if (A->hasValidIndex() && B->hasValidIndex())
return A->getIndex() < B->getIndex();
if (A->hasValidIndex() && !B->hasValidIndex())
return true;
if (!A->hasValidIndex() && B->hasValidIndex())
return false;
return A->getAddress() < B->getAddress();
});
llvm::stable_sort(SortedFunctions,
[](const BinaryFunction *A, const BinaryFunction *B) {
if (A->hasValidIndex() && B->hasValidIndex())
return A->getIndex() < B->getIndex();
if (A->hasValidIndex() && !B->hasValidIndex())
return true;
if (!A->hasValidIndex() && B->hasValidIndex())
return false;
return A->getAddress() < B->getAddress();
});
for (const BinaryFunction *Func : SortedFunctions) {
if (!Func->hasValidIndex())
@ -440,7 +436,7 @@ void ReorderFunctions::runOnFunctions(BinaryContext &BC) {
if (LinkSectionsFile) {
const char *Indent = "";
std::vector<StringRef> AllNames = Func->getNames();
std::sort(AllNames.begin(), AllNames.end());
llvm::sort(AllNames);
for (StringRef Name : AllNames) {
const size_t SlashPos = Name.find('/');
if (SlashPos != std::string::npos) {

View File

@ -853,24 +853,21 @@ void ShrinkWrapping::computeDomOrder() {
DominatorAnalysis<false> &DA = Info.getDominatorAnalysis();
auto &InsnToBB = Info.getInsnToBBMap();
std::sort(Order.begin(), Order.end(),
[&](const MCPhysReg &A, const MCPhysReg &B) {
BinaryBasicBlock *BBA =
BestSavePos[A] ? InsnToBB[BestSavePos[A]] : nullptr;
BinaryBasicBlock *BBB =
BestSavePos[B] ? InsnToBB[BestSavePos[B]] : nullptr;
if (BBA == BBB)
return A < B;
if (!BBA && BBB)
return false;
if (BBA && !BBB)
return true;
if (DA.doesADominateB(*BestSavePos[A], *BestSavePos[B]))
return true;
if (DA.doesADominateB(*BestSavePos[B], *BestSavePos[A]))
return false;
return A < B;
});
llvm::sort(Order, [&](const MCPhysReg &A, const MCPhysReg &B) {
BinaryBasicBlock *BBA = BestSavePos[A] ? InsnToBB[BestSavePos[A]] : nullptr;
BinaryBasicBlock *BBB = BestSavePos[B] ? InsnToBB[BestSavePos[B]] : nullptr;
if (BBA == BBB)
return A < B;
if (!BBA && BBB)
return false;
if (BBA && !BBB)
return true;
if (DA.doesADominateB(*BestSavePos[A], *BestSavePos[B]))
return true;
if (DA.doesADominateB(*BestSavePos[B], *BestSavePos[A]))
return false;
return A < B;
});
for (MCPhysReg I = 0, E = BC.MRI->getNumRegs(); I != E; ++I)
DomOrder[Order[I]] = I;
@ -1821,21 +1818,17 @@ BBIterTy ShrinkWrapping::processInsertionsList(
}
// Reorder POPs to obey the correct dominance relation between them
std::stable_sort(TodoList.begin(), TodoList.end(),
[&](const WorklistItem &A, const WorklistItem &B) {
if ((A.Action != WorklistItem::InsertPushOrPop ||
!A.FIEToInsert.IsLoad) &&
(B.Action != WorklistItem::InsertPushOrPop ||
!B.FIEToInsert.IsLoad))
return false;
if ((A.Action != WorklistItem::InsertPushOrPop ||
!A.FIEToInsert.IsLoad))
return true;
if ((B.Action != WorklistItem::InsertPushOrPop ||
!B.FIEToInsert.IsLoad))
return false;
return DomOrder[B.AffectedReg] < DomOrder[A.AffectedReg];
});
llvm::stable_sort(TodoList, [&](const WorklistItem &A,
const WorklistItem &B) {
if ((A.Action != WorklistItem::InsertPushOrPop || !A.FIEToInsert.IsLoad) &&
(B.Action != WorklistItem::InsertPushOrPop || !B.FIEToInsert.IsLoad))
return false;
if ((A.Action != WorklistItem::InsertPushOrPop || !A.FIEToInsert.IsLoad))
return true;
if ((B.Action != WorklistItem::InsertPushOrPop || !B.FIEToInsert.IsLoad))
return false;
return DomOrder[B.AffectedReg] < DomOrder[A.AffectedReg];
});
// Process insertions
for (WorklistItem &Item : TodoList) {

View File

@ -188,10 +188,10 @@ void SplitFunctions::splitFunction(BinaryFunction &BF) {
// All blocks with 0 count that we can move go to the end of the function.
// Even if they were natural to cluster formation and were seen in-between
// hot basic blocks.
std::stable_sort(BF.layout_begin(), BF.layout_end(),
[&](BinaryBasicBlock *A, BinaryBasicBlock *B) {
return A->canOutline() < B->canOutline();
});
llvm::stable_sort(BF.layout(),
[&](BinaryBasicBlock *A, BinaryBasicBlock *B) {
return A->canOutline() < B->canOutline();
});
} else if (BF.hasEHRanges() && !opts::SplitEH) {
// Typically functions with exception handling have landing pads at the end.
// We cannot move beginning of landing pads, but we can move 0-count blocks

View File

@ -101,12 +101,10 @@ void ThreeWayBranch::runOnFunction(BinaryFunction &Function) {
Blocks.push_back(std::make_pair(SecondEndpoint, SecondCC));
Blocks.push_back(std::make_pair(ThirdEndpoint, ThirdCC));
std::sort(Blocks.begin(), Blocks.end(),
[&](const std::pair<BinaryBasicBlock *, unsigned> A,
const std::pair<BinaryBasicBlock *, unsigned> B) {
return A.first->getExecutionCount() <
B.first->getExecutionCount();
});
llvm::sort(Blocks, [&](const std::pair<BinaryBasicBlock *, unsigned> A,
const std::pair<BinaryBasicBlock *, unsigned> B) {
return A.first->getExecutionCount() < B.first->getExecutionCount();
});
uint64_t NewSecondBranchCount = Blocks[1].first->getExecutionCount() +
Blocks[0].first->getExecutionCount();

View File

@ -114,10 +114,10 @@ std::vector<SectionNameAndRange> getTextSections(const BinaryContext *BC) {
sections.push_back(
{Section.getName(), Section.getAddress(), Section.getEndAddress()});
}
std::sort(sections.begin(), sections.end(),
[](const SectionNameAndRange &A, const SectionNameAndRange &B) {
return A.BeginAddress < B.BeginAddress;
});
llvm::sort(sections,
[](const SectionNameAndRange &A, const SectionNameAndRange &B) {
return A.BeginAddress < B.BeginAddress;
});
return sections;
}
}

View File

@ -95,7 +95,7 @@ void FuncBranchData::appendFrom(const FuncBranchData &FBD, uint64_t Offset) {
I->To.Offset += Offset;
}
}
std::stable_sort(Data.begin(), Data.end());
llvm::stable_sort(Data);
ExecutionCount += FBD.ExecutionCount;
for (auto I = FBD.EntryData.begin(), E = FBD.EntryData.end(); I != E; ++I) {
assert(I->To.Name == FBD.Name);
@ -123,7 +123,7 @@ void SampleInfo::print(raw_ostream &OS) const {
}
uint64_t FuncSampleData::getSamples(uint64_t Start, uint64_t End) const {
assert(std::is_sorted(Data.begin(), Data.end()));
assert(llvm::is_sorted(Data));
struct Compare {
bool operator()(const SampleInfo &SI, const uint64_t Val) const {
return SI.Loc.Offset < Val;
@ -133,8 +133,8 @@ uint64_t FuncSampleData::getSamples(uint64_t Start, uint64_t End) const {
}
};
uint64_t Result = 0;
for (auto I = std::lower_bound(Data.begin(), Data.end(), Start, Compare()),
E = std::lower_bound(Data.begin(), Data.end(), End, Compare());
for (auto I = llvm::lower_bound(Data, Start, Compare()),
E = llvm::lower_bound(Data, End, Compare());
I != E; ++I)
Result += I->Hits;
return Result;
@ -1146,12 +1146,10 @@ std::error_code DataReader::parseInNoLBRMode() {
}
for (StringMapEntry<FuncSampleData> &FuncSamples : NamesToSamples)
std::stable_sort(FuncSamples.second.Data.begin(),
FuncSamples.second.Data.end());
llvm::stable_sort(FuncSamples.second.Data);
for (StringMapEntry<FuncMemData> &MemEvents : NamesToMemEvents)
std::stable_sort(MemEvents.second.Data.begin(),
MemEvents.second.Data.end());
llvm::stable_sort(MemEvents.second.Data);
return std::error_code();
}
@ -1247,12 +1245,10 @@ std::error_code DataReader::parse() {
}
for (StringMapEntry<FuncBranchData> &FuncBranches : NamesToBranches)
std::stable_sort(FuncBranches.second.Data.begin(),
FuncBranches.second.Data.end());
llvm::stable_sort(FuncBranches.second.Data);
for (StringMapEntry<FuncMemData> &MemEvents : NamesToMemEvents)
std::stable_sort(MemEvents.second.Data.begin(),
MemEvents.second.Data.end());
llvm::stable_sort(MemEvents.second.Data);
return std::error_code();
}

View File

@ -233,7 +233,7 @@ void Heatmap::printCDF(raw_ostream &OS) const {
NumTotalCounts += KV.second;
}
std::sort(Counts.begin(), Counts.end(), std::greater<uint64_t>());
llvm::sort(Counts, std::greater<uint64_t>());
double RatioLeftInKB = (1.0 * BucketSize) / 1024;
assert(NumTotalCounts > 0 &&

View File

@ -106,7 +106,7 @@ void convert(const BinaryFunction &BF,
}
}
std::sort(YamlBB.CallSites.begin(), YamlBB.CallSites.end());
llvm::sort(YamlBB.CallSites);
// Skip printing if there's no profile data for non-entry basic block.
// Include landing pads with non-zero execution count.

View File

@ -302,10 +302,10 @@ class RewriteInstanceDiff {
continue;
Unmapped.emplace_back(&Function);
}
std::sort(Unmapped.begin(), Unmapped.end(),
[&](const BinaryFunction *A, const BinaryFunction *B) {
return A->getFunctionScore() > B->getFunctionScore();
});
llvm::sort(Unmapped,
[&](const BinaryFunction *A, const BinaryFunction *B) {
return A->getFunctionScore() > B->getFunctionScore();
});
for (const BinaryFunction *Function : Unmapped) {
outs() << Function->getPrintName() << " : ";
outs() << Function->getFunctionScore() << "\n";

View File

@ -1243,10 +1243,9 @@ static std::string extractDWOTUFromDWP(
// Sorting so it's easy to compare output.
// They should be sharing the same Abbrev.
std::sort(TUContributions.begin(), TUContributions.end(),
[](const TUEntry &V1, const TUEntry &V2) -> bool {
return V1.second->Offset < V2.second->Offset;
});
llvm::sort(TUContributions, [](const TUEntry &V1, const TUEntry &V2) -> bool {
return V1.second->Offset < V2.second->Offset;
});
for (auto &PairEntry : TUContributions) {
const DWARFUnitIndex::Entry::SectionContribution *C = PairEntry.second;
@ -1289,11 +1288,11 @@ static void extractTypesFromDWPDWARF5(
}
// Sorting so it's easy to compare output.
// They should be sharing the same Abbrev.
std::sort(TUContributions.begin(), TUContributions.end(),
[](const DWARFUnitIndex::Entry::SectionContribution *V1,
const DWARFUnitIndex::Entry::SectionContribution *V2) -> bool {
return V1->Offset < V2->Offset;
});
llvm::sort(TUContributions,
[](const DWARFUnitIndex::Entry::SectionContribution *V1,
const DWARFUnitIndex::Entry::SectionContribution *V2) -> bool {
return V1->Offset < V2->Offset;
});
Streamer.switchSection(MCOFI.getDwarfInfoDWOSection());
for (const auto *C : TUContributions)
Streamer.emitBytes(Contents.slice(C->Offset, C->Offset + C->Length));

View File

@ -191,10 +191,9 @@ std::vector<DataInCodeRegion> readDataInCode(const MachOObjectFile &O) {
DataInCode.reserve(NumberOfEntries);
for (auto I = O.begin_dices(), E = O.end_dices(); I != E; ++I)
DataInCode.emplace_back(*I);
std::stable_sort(DataInCode.begin(), DataInCode.end(),
[](DataInCodeRegion LHS, DataInCodeRegion RHS) {
return LHS.Offset < RHS.Offset;
});
llvm::stable_sort(DataInCode, [](DataInCodeRegion LHS, DataInCodeRegion RHS) {
return LHS.Offset < RHS.Offset;
});
return DataInCode;
}
@ -244,10 +243,10 @@ void MachORewriteInstance::discoverFileObjects() {
}
if (FunctionSymbols.empty())
return;
std::stable_sort(FunctionSymbols.begin(), FunctionSymbols.end(),
[](const SymbolRef &LHS, const SymbolRef &RHS) {
return cantFail(LHS.getValue()) < cantFail(RHS.getValue());
});
llvm::stable_sort(
FunctionSymbols, [](const SymbolRef &LHS, const SymbolRef &RHS) {
return cantFail(LHS.getValue()) < cantFail(RHS.getValue());
});
for (size_t Index = 0; Index < FunctionSymbols.size(); ++Index) {
const uint64_t Address = cantFail(FunctionSymbols[Index].getValue());
ErrorOr<BinarySection &> Section = BC->getSectionForAddress(Address);

View File

@ -308,10 +308,9 @@ namespace {
bool refersToReorderedSection(ErrorOr<BinarySection &> Section) {
auto Itr =
std::find_if(opts::ReorderData.begin(), opts::ReorderData.end(),
[&](const std::string &SectionName) {
return (Section && Section->getName() == SectionName);
});
llvm::find_if(opts::ReorderData, [&](const std::string &SectionName) {
return (Section && Section->getName() == SectionName);
});
return Itr != opts::ReorderData.end();
}
@ -839,8 +838,8 @@ void RewriteInstance::discoverFileObjects() {
return Section.isAllocatable();
};
std::vector<SymbolRef> SortedFileSymbols;
std::copy_if(InputFile->symbol_begin(), InputFile->symbol_end(),
std::back_inserter(SortedFileSymbols), isSymbolInMemory);
llvm::copy_if(InputFile->symbols(), std::back_inserter(SortedFileSymbols),
isSymbolInMemory);
auto CompareSymbols = [this](const SymbolRef &A, const SymbolRef &B) {
// Marker symbols have the highest precedence, while
// SECTIONs have the lowest.
@ -865,8 +864,7 @@ void RewriteInstance::discoverFileObjects() {
return false;
};
std::stable_sort(SortedFileSymbols.begin(), SortedFileSymbols.end(),
CompareSymbols);
llvm::stable_sort(SortedFileSymbols, CompareSymbols);
auto LastSymbol = SortedFileSymbols.end() - 1;
@ -2702,11 +2700,10 @@ void RewriteInstance::selectFunctionsToProcess() {
if (ProfileReader->mayHaveProfileData(Function))
TopFunctions.push_back(&Function);
}
std::sort(TopFunctions.begin(), TopFunctions.end(),
[](const BinaryFunction *A, const BinaryFunction *B) {
return
A->getKnownExecutionCount() < B->getKnownExecutionCount();
});
llvm::sort(
TopFunctions, [](const BinaryFunction *A, const BinaryFunction *B) {
return A->getKnownExecutionCount() < B->getKnownExecutionCount();
});
size_t Index = TopFunctions.size() * opts::LiteThresholdPct / 100;
if (Index)
@ -3295,7 +3292,7 @@ void RewriteInstance::updatePseudoProbes() {
std::vector<uint64_t> Addresses;
for (auto &Entry : Address2ProbesMap)
Addresses.push_back(Entry.first);
std::sort(Addresses.begin(), Addresses.end());
llvm::sort(Addresses);
for (uint64_t Key : Addresses) {
for (MCDecodedPseudoProbe &Probe : Address2ProbesMap[Key]) {
if (Probe.getAddress() == INT64_MAX)
@ -3569,7 +3566,7 @@ std::vector<BinarySection *> RewriteInstance::getCodeSections() {
};
// Determine the order of sections.
std::stable_sort(CodeSections.begin(), CodeSections.end(), compareSections);
llvm::stable_sort(CodeSections, compareSections);
return CodeSections;
}
@ -3601,12 +3598,9 @@ void RewriteInstance::mapCodeSections(RuntimeDyld &RTDyld) {
std::vector<BinarySection *> CodeSections = getCodeSections();
// Remove sections that were pre-allocated (patch sections).
CodeSections.erase(
std::remove_if(CodeSections.begin(), CodeSections.end(),
[](BinarySection *Section) {
return Section->getOutputAddress();
}),
CodeSections.end());
llvm::erase_if(CodeSections, [](BinarySection *Section) {
return Section->getOutputAddress();
});
LLVM_DEBUG(dbgs() << "Code sections in the order of output:\n";
for (const BinarySection *Section : CodeSections)
dbgs() << Section->getName() << '\n';
@ -4263,11 +4257,11 @@ RewriteInstance::getOutputSections(ELFObjectFile<ELFT> *File,
}
// Sort all allocatable sections by their offset.
std::stable_sort(OutputSections.begin(), OutputSections.end(),
[] (const std::pair<std::string, ELFShdrTy> &A,
const std::pair<std::string, ELFShdrTy> &B) {
return A.second.sh_offset < B.second.sh_offset;
});
llvm::stable_sort(OutputSections,
[](const std::pair<std::string, ELFShdrTy> &A,
const std::pair<std::string, ELFShdrTy> &B) {
return A.second.sh_offset < B.second.sh_offset;
});
// Fix section sizes to prevent overlapping.
ELFShdrTy *PrevSection = nullptr;
@ -4376,11 +4370,10 @@ RewriteInstance::getOutputSections(ELFObjectFile<ELFT> *File,
}
std::vector<ELFShdrTy> SectionsOnly(OutputSections.size());
std::transform(OutputSections.begin(), OutputSections.end(),
SectionsOnly.begin(),
[](std::pair<std::string, ELFShdrTy> &SectionInfo) {
return SectionInfo.second;
});
llvm::transform(OutputSections, SectionsOnly.begin(),
[](std::pair<std::string, ELFShdrTy> &SectionInfo) {
return SectionInfo.second;
});
return SectionsOnly;
}
@ -4777,13 +4770,11 @@ void RewriteInstance::updateELFSymbolTable(
}
// Put local symbols at the beginning.
std::stable_sort(Symbols.begin(), Symbols.end(),
[](const ELFSymTy &A, const ELFSymTy &B) {
if (A.getBinding() == ELF::STB_LOCAL &&
B.getBinding() != ELF::STB_LOCAL)
return true;
return false;
});
llvm::stable_sort(Symbols, [](const ELFSymTy &A, const ELFSymTy &B) {
if (A.getBinding() == ELF::STB_LOCAL && B.getBinding() != ELF::STB_LOCAL)
return true;
return false;
});
for (const ELFSymTy &Symbol : Symbols)
Write(0, Symbol);

View File

@ -243,13 +243,12 @@ std::string InstrumentationRuntimeLibrary::buildTables(BinaryContext &BC) {
};
// Indirect targets need to be sorted for fast lookup during runtime
std::sort(Summary->IndCallTargetDescriptions.begin(),
Summary->IndCallTargetDescriptions.end(),
[&](const IndCallTargetDescription &A,
const IndCallTargetDescription &B) {
return getOutputAddress(*A.Target, A.ToLoc.Offset) <
getOutputAddress(*B.Target, B.ToLoc.Offset);
});
llvm::sort(Summary->IndCallTargetDescriptions,
[&](const IndCallTargetDescription &A,
const IndCallTargetDescription &B) {
return getOutputAddress(*A.Target, A.ToLoc.Offset) <
getOutputAddress(*B.Target, B.ToLoc.Offset);
});
// Start of the vector with descriptions (one CounterDescription for each
// counter), vector size is Counters.size() CounterDescription-sized elmts

View File

@ -398,14 +398,14 @@ int main(int argc, char **argv) {
BinaryProfile MergedProfile;
MergedProfile.Header = MergedHeader;
MergedProfile.Functions.resize(MergedBFs.size());
std::transform(
MergedBFs.begin(), MergedBFs.end(), MergedProfile.Functions.begin(),
llvm::transform(
MergedBFs, MergedProfile.Functions.begin(),
[](StringMapEntry<BinaryFunctionProfile> &V) { return V.second; });
// For consistency, sort functions by their IDs.
std::sort(MergedProfile.Functions.begin(), MergedProfile.Functions.end(),
[](const BinaryFunctionProfile &A,
const BinaryFunctionProfile &B) { return A.Id < B.Id; });
llvm::sort(MergedProfile.Functions,
[](const BinaryFunctionProfile &A,
const BinaryFunctionProfile &B) { return A.Id < B.Id; });
YamlOut << MergedProfile;
}
@ -435,9 +435,8 @@ int main(int argc, char **argv) {
CountFuncType CountFunc = (opts::PrintFunctionList == opts::ST_EXEC_COUNT)
? ExecCountFunc
: BranchCountFunc;
std::transform(MergedBFs.begin(), MergedBFs.end(), FunctionList.begin(),
CountFunc);
std::stable_sort(FunctionList.rbegin(), FunctionList.rend());
llvm::transform(MergedBFs, FunctionList.begin(), CountFunc);
llvm::stable_sort(reverse(FunctionList));
errs() << "Functions sorted by "
<< (opts::PrintFunctionList == opts::ST_EXEC_COUNT ? "execution"
: "total branch")