1 //=-- InstrProf.cpp - Instrumented profiling format support -----------------=// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file contains support for clang's instrumentation based PGO and 11 // coverage. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/ProfileData/InstrProf.h" 16 #include "llvm/ADT/StringExtras.h" 17 #include "llvm/IR/Constants.h" 18 #include "llvm/IR/Function.h" 19 #include "llvm/IR/GlobalVariable.h" 20 #include "llvm/IR/Module.h" 21 #include "llvm/Support/Compression.h" 22 #include "llvm/Support/ErrorHandling.h" 23 #include "llvm/Support/LEB128.h" 24 #include "llvm/Support/ManagedStatic.h" 25 26 using namespace llvm; 27 28 namespace { 29 class InstrProfErrorCategoryType : public std::error_category { 30 const char *name() const LLVM_NOEXCEPT override { return "llvm.instrprof"; } 31 std::string message(int IE) const override { 32 instrprof_error E = static_cast<instrprof_error>(IE); 33 switch (E) { 34 case instrprof_error::success: 35 return "Success"; 36 case instrprof_error::eof: 37 return "End of File"; 38 case instrprof_error::unrecognized_format: 39 return "Unrecognized instrumentation profile encoding format"; 40 case instrprof_error::bad_magic: 41 return "Invalid instrumentation profile data (bad magic)"; 42 case instrprof_error::bad_header: 43 return "Invalid instrumentation profile data (file header is corrupt)"; 44 case instrprof_error::unsupported_version: 45 return "Unsupported instrumentation profile format version"; 46 case instrprof_error::unsupported_hash_type: 47 return "Unsupported instrumentation profile hash type"; 48 case instrprof_error::too_large: 49 return "Too much profile data"; 50 case instrprof_error::truncated: 51 return "Truncated profile data"; 52 case instrprof_error::malformed: 53 return "Malformed instrumentation profile data"; 54 case instrprof_error::unknown_function: 55 return "No profile data available for function"; 56 case instrprof_error::hash_mismatch: 57 return "Function control flow change detected (hash mismatch)"; 58 case instrprof_error::count_mismatch: 59 return "Function basic block count change detected (counter mismatch)"; 60 case instrprof_error::counter_overflow: 61 return "Counter overflow"; 62 case instrprof_error::value_site_count_mismatch: 63 return "Function value site count change detected (counter mismatch)"; 64 } 65 llvm_unreachable("A value of instrprof_error has no message."); 66 } 67 }; 68 } // end anonymous namespace 69 70 static ManagedStatic<InstrProfErrorCategoryType> ErrorCategory; 71 72 const std::error_category &llvm::instrprof_category() { 73 return *ErrorCategory; 74 } 75 76 namespace llvm { 77 78 std::string getPGOFuncName(StringRef RawFuncName, 79 GlobalValue::LinkageTypes Linkage, 80 StringRef FileName, 81 uint64_t Version LLVM_ATTRIBUTE_UNUSED) { 82 83 // Function names may be prefixed with a binary '1' to indicate 84 // that the backend should not modify the symbols due to any platform 85 // naming convention. Do not include that '1' in the PGO profile name. 86 if (RawFuncName[0] == '\1') 87 RawFuncName = RawFuncName.substr(1); 88 89 std::string FuncName = RawFuncName; 90 if (llvm::GlobalValue::isLocalLinkage(Linkage)) { 91 // For local symbols, prepend the main file name to distinguish them. 92 // Do not include the full path in the file name since there's no guarantee 93 // that it will stay the same, e.g., if the files are checked out from 94 // version control in different locations. 95 if (FileName.empty()) 96 FuncName = FuncName.insert(0, "<unknown>:"); 97 else 98 FuncName = FuncName.insert(0, FileName.str() + ":"); 99 } 100 return FuncName; 101 } 102 103 std::string getPGOFuncName(const Function &F, uint64_t Version) { 104 return getPGOFuncName(F.getName(), F.getLinkage(), F.getParent()->getName(), 105 Version); 106 } 107 108 StringRef getFuncNameWithoutPrefix(StringRef PGOFuncName, StringRef FileName) { 109 if (FileName.empty()) 110 return PGOFuncName; 111 // Drop the file name including ':'. See also getPGOFuncName. 112 if (PGOFuncName.startswith(FileName)) 113 PGOFuncName = PGOFuncName.drop_front(FileName.size() + 1); 114 return PGOFuncName; 115 } 116 117 // \p FuncName is the string used as profile lookup key for the function. A 118 // symbol is created to hold the name. Return the legalized symbol name. 119 static std::string getPGOFuncNameVarName(StringRef FuncName, 120 GlobalValue::LinkageTypes Linkage) { 121 std::string VarName = getInstrProfNameVarPrefix(); 122 VarName += FuncName; 123 124 if (!GlobalValue::isLocalLinkage(Linkage)) 125 return VarName; 126 127 // Now fix up illegal chars in local VarName that may upset the assembler. 128 const char *InvalidChars = "-:<>\"'"; 129 size_t found = VarName.find_first_of(InvalidChars); 130 while (found != std::string::npos) { 131 VarName[found] = '_'; 132 found = VarName.find_first_of(InvalidChars, found + 1); 133 } 134 return VarName; 135 } 136 137 GlobalVariable *createPGOFuncNameVar(Module &M, 138 GlobalValue::LinkageTypes Linkage, 139 StringRef FuncName) { 140 141 // We generally want to match the function's linkage, but available_externally 142 // and extern_weak both have the wrong semantics, and anything that doesn't 143 // need to link across compilation units doesn't need to be visible at all. 144 if (Linkage == GlobalValue::ExternalWeakLinkage) 145 Linkage = GlobalValue::LinkOnceAnyLinkage; 146 else if (Linkage == GlobalValue::AvailableExternallyLinkage) 147 Linkage = GlobalValue::LinkOnceODRLinkage; 148 else if (Linkage == GlobalValue::InternalLinkage || 149 Linkage == GlobalValue::ExternalLinkage) 150 Linkage = GlobalValue::PrivateLinkage; 151 152 auto *Value = ConstantDataArray::getString(M.getContext(), FuncName, false); 153 auto FuncNameVar = 154 new GlobalVariable(M, Value->getType(), true, Linkage, Value, 155 getPGOFuncNameVarName(FuncName, Linkage)); 156 157 // Hide the symbol so that we correctly get a copy for each executable. 158 if (!GlobalValue::isLocalLinkage(FuncNameVar->getLinkage())) 159 FuncNameVar->setVisibility(GlobalValue::HiddenVisibility); 160 161 return FuncNameVar; 162 } 163 164 GlobalVariable *createPGOFuncNameVar(Function &F, StringRef FuncName) { 165 return createPGOFuncNameVar(*F.getParent(), F.getLinkage(), FuncName); 166 } 167 168 void InstrProfSymtab::create(const Module &M) { 169 for (const Function &F : M) 170 addFuncName(getPGOFuncName(F)); 171 172 finalizeSymtab(); 173 } 174 175 int collectPGOFuncNameStrings(const std::vector<std::string> &NameStrs, 176 bool doCompression, std::string &Result) { 177 uint8_t Header[16], *P = Header; 178 std::string UncompressedNameStrings = 179 join(NameStrs.begin(), NameStrs.end(), StringRef(" ")); 180 181 unsigned EncLen = encodeULEB128(UncompressedNameStrings.length(), P); 182 P += EncLen; 183 184 auto WriteStringToResult = [&](size_t CompressedLen, 185 const std::string &InputStr) { 186 EncLen = encodeULEB128(CompressedLen, P); 187 P += EncLen; 188 char *HeaderStr = reinterpret_cast<char *>(&Header[0]); 189 unsigned HeaderLen = P - &Header[0]; 190 Result.append(HeaderStr, HeaderLen); 191 Result += InputStr; 192 return 0; 193 }; 194 195 if (!doCompression) 196 return WriteStringToResult(0, UncompressedNameStrings); 197 198 SmallVector<char, 128> CompressedNameStrings; 199 zlib::Status Success = 200 zlib::compress(StringRef(UncompressedNameStrings), CompressedNameStrings, 201 zlib::BestSizeCompression); 202 203 if (Success != zlib::StatusOK) 204 return 1; 205 206 return WriteStringToResult( 207 CompressedNameStrings.size(), 208 std::string(CompressedNameStrings.data(), CompressedNameStrings.size())); 209 } 210 211 StringRef getPGOFuncNameInitializer(GlobalVariable *NameVar) { 212 auto *Arr = cast<ConstantDataArray>(NameVar->getInitializer()); 213 StringRef NameStr = 214 Arr->isCString() ? Arr->getAsCString() : Arr->getAsString(); 215 return NameStr; 216 } 217 218 int collectPGOFuncNameStrings(const std::vector<GlobalVariable *> &NameVars, 219 std::string &Result, bool doCompression) { 220 std::vector<std::string> NameStrs; 221 for (auto *NameVar : NameVars) { 222 NameStrs.push_back(getPGOFuncNameInitializer(NameVar)); 223 } 224 return collectPGOFuncNameStrings( 225 NameStrs, zlib::isAvailable() && doCompression, Result); 226 } 227 228 int readPGOFuncNameStrings(StringRef NameStrings, InstrProfSymtab &Symtab) { 229 const uint8_t *P = reinterpret_cast<const uint8_t *>(NameStrings.data()); 230 const uint8_t *EndP = reinterpret_cast<const uint8_t *>(NameStrings.data() + 231 NameStrings.size()); 232 while (P < EndP) { 233 uint32_t N; 234 uint64_t UncompressedSize = decodeULEB128(P, &N); 235 P += N; 236 uint64_t CompressedSize = decodeULEB128(P, &N); 237 P += N; 238 bool isCompressed = (CompressedSize != 0); 239 SmallString<128> UncompressedNameStrings; 240 StringRef NameStrings; 241 if (isCompressed) { 242 StringRef CompressedNameStrings(reinterpret_cast<const char *>(P), 243 CompressedSize); 244 if (zlib::uncompress(CompressedNameStrings, UncompressedNameStrings, 245 UncompressedSize) != zlib::StatusOK) 246 return 1; 247 P += CompressedSize; 248 NameStrings = StringRef(UncompressedNameStrings.data(), 249 UncompressedNameStrings.size()); 250 } else { 251 NameStrings = 252 StringRef(reinterpret_cast<const char *>(P), UncompressedSize); 253 P += UncompressedSize; 254 } 255 // Now parse the name strings. 256 SmallVector<StringRef, 0> Names; 257 NameStrings.split(Names, ' '); 258 for (StringRef &Name : Names) 259 Symtab.addFuncName(Name); 260 261 while (P < EndP && *P == 0) 262 P++; 263 } 264 Symtab.finalizeSymtab(); 265 return 0; 266 } 267 268 instrprof_error InstrProfValueSiteRecord::merge(InstrProfValueSiteRecord &Input, 269 uint64_t Weight) { 270 this->sortByTargetValues(); 271 Input.sortByTargetValues(); 272 auto I = ValueData.begin(); 273 auto IE = ValueData.end(); 274 instrprof_error Result = instrprof_error::success; 275 for (auto J = Input.ValueData.begin(), JE = Input.ValueData.end(); J != JE; 276 ++J) { 277 while (I != IE && I->Value < J->Value) 278 ++I; 279 if (I != IE && I->Value == J->Value) { 280 bool Overflowed; 281 I->Count = SaturatingMultiplyAdd(J->Count, Weight, I->Count, &Overflowed); 282 if (Overflowed) 283 Result = instrprof_error::counter_overflow; 284 ++I; 285 continue; 286 } 287 ValueData.insert(I, *J); 288 } 289 return Result; 290 } 291 292 instrprof_error InstrProfValueSiteRecord::scale(uint64_t Weight) { 293 instrprof_error Result = instrprof_error::success; 294 for (auto I = ValueData.begin(), IE = ValueData.end(); I != IE; ++I) { 295 bool Overflowed; 296 I->Count = SaturatingMultiply(I->Count, Weight, &Overflowed); 297 if (Overflowed) 298 Result = instrprof_error::counter_overflow; 299 } 300 return Result; 301 } 302 303 // Merge Value Profile data from Src record to this record for ValueKind. 304 // Scale merged value counts by \p Weight. 305 instrprof_error InstrProfRecord::mergeValueProfData(uint32_t ValueKind, 306 InstrProfRecord &Src, 307 uint64_t Weight) { 308 uint32_t ThisNumValueSites = getNumValueSites(ValueKind); 309 uint32_t OtherNumValueSites = Src.getNumValueSites(ValueKind); 310 if (ThisNumValueSites != OtherNumValueSites) 311 return instrprof_error::value_site_count_mismatch; 312 std::vector<InstrProfValueSiteRecord> &ThisSiteRecords = 313 getValueSitesForKind(ValueKind); 314 std::vector<InstrProfValueSiteRecord> &OtherSiteRecords = 315 Src.getValueSitesForKind(ValueKind); 316 instrprof_error Result = instrprof_error::success; 317 for (uint32_t I = 0; I < ThisNumValueSites; I++) 318 MergeResult(Result, ThisSiteRecords[I].merge(OtherSiteRecords[I], Weight)); 319 return Result; 320 } 321 322 instrprof_error InstrProfRecord::merge(InstrProfRecord &Other, 323 uint64_t Weight) { 324 // If the number of counters doesn't match we either have bad data 325 // or a hash collision. 326 if (Counts.size() != Other.Counts.size()) 327 return instrprof_error::count_mismatch; 328 329 instrprof_error Result = instrprof_error::success; 330 331 for (size_t I = 0, E = Other.Counts.size(); I < E; ++I) { 332 bool Overflowed; 333 Counts[I] = 334 SaturatingMultiplyAdd(Other.Counts[I], Weight, Counts[I], &Overflowed); 335 if (Overflowed) 336 Result = instrprof_error::counter_overflow; 337 } 338 339 for (uint32_t Kind = IPVK_First; Kind <= IPVK_Last; ++Kind) 340 MergeResult(Result, mergeValueProfData(Kind, Other, Weight)); 341 342 return Result; 343 } 344 345 instrprof_error InstrProfRecord::scaleValueProfData(uint32_t ValueKind, 346 uint64_t Weight) { 347 uint32_t ThisNumValueSites = getNumValueSites(ValueKind); 348 std::vector<InstrProfValueSiteRecord> &ThisSiteRecords = 349 getValueSitesForKind(ValueKind); 350 instrprof_error Result = instrprof_error::success; 351 for (uint32_t I = 0; I < ThisNumValueSites; I++) 352 MergeResult(Result, ThisSiteRecords[I].scale(Weight)); 353 return Result; 354 } 355 356 instrprof_error InstrProfRecord::scale(uint64_t Weight) { 357 instrprof_error Result = instrprof_error::success; 358 for (auto &Count : this->Counts) { 359 bool Overflowed; 360 Count = SaturatingMultiply(Count, Weight, &Overflowed); 361 if (Overflowed && Result == instrprof_error::success) { 362 Result = instrprof_error::counter_overflow; 363 } 364 } 365 for (uint32_t Kind = IPVK_First; Kind <= IPVK_Last; ++Kind) 366 MergeResult(Result, scaleValueProfData(Kind, Weight)); 367 368 return Result; 369 } 370 371 // Map indirect call target name hash to name string. 372 uint64_t InstrProfRecord::remapValue(uint64_t Value, uint32_t ValueKind, 373 ValueMapType *ValueMap) { 374 if (!ValueMap) 375 return Value; 376 switch (ValueKind) { 377 case IPVK_IndirectCallTarget: { 378 auto Result = 379 std::lower_bound(ValueMap->begin(), ValueMap->end(), Value, 380 [](const std::pair<uint64_t, uint64_t> &LHS, 381 uint64_t RHS) { return LHS.first < RHS; }); 382 if (Result != ValueMap->end()) 383 Value = (uint64_t)Result->second; 384 break; 385 } 386 } 387 return Value; 388 } 389 390 void InstrProfRecord::addValueData(uint32_t ValueKind, uint32_t Site, 391 InstrProfValueData *VData, uint32_t N, 392 ValueMapType *ValueMap) { 393 for (uint32_t I = 0; I < N; I++) { 394 VData[I].Value = remapValue(VData[I].Value, ValueKind, ValueMap); 395 } 396 std::vector<InstrProfValueSiteRecord> &ValueSites = 397 getValueSitesForKind(ValueKind); 398 if (N == 0) 399 ValueSites.push_back(InstrProfValueSiteRecord()); 400 else 401 ValueSites.emplace_back(VData, VData + N); 402 } 403 404 #define INSTR_PROF_COMMON_API_IMPL 405 #include "llvm/ProfileData/InstrProfData.inc" 406 407 /*! 408 * \brief ValueProfRecordClosure Interface implementation for InstrProfRecord 409 * class. These C wrappers are used as adaptors so that C++ code can be 410 * invoked as callbacks. 411 */ 412 uint32_t getNumValueKindsInstrProf(const void *Record) { 413 return reinterpret_cast<const InstrProfRecord *>(Record)->getNumValueKinds(); 414 } 415 416 uint32_t getNumValueSitesInstrProf(const void *Record, uint32_t VKind) { 417 return reinterpret_cast<const InstrProfRecord *>(Record) 418 ->getNumValueSites(VKind); 419 } 420 421 uint32_t getNumValueDataInstrProf(const void *Record, uint32_t VKind) { 422 return reinterpret_cast<const InstrProfRecord *>(Record) 423 ->getNumValueData(VKind); 424 } 425 426 uint32_t getNumValueDataForSiteInstrProf(const void *R, uint32_t VK, 427 uint32_t S) { 428 return reinterpret_cast<const InstrProfRecord *>(R) 429 ->getNumValueDataForSite(VK, S); 430 } 431 432 void getValueForSiteInstrProf(const void *R, InstrProfValueData *Dst, 433 uint32_t K, uint32_t S) { 434 reinterpret_cast<const InstrProfRecord *>(R)->getValueForSite(Dst, K, S); 435 return; 436 } 437 438 ValueProfData *allocValueProfDataInstrProf(size_t TotalSizeInBytes) { 439 ValueProfData *VD = 440 (ValueProfData *)(new (::operator new(TotalSizeInBytes)) ValueProfData()); 441 memset(VD, 0, TotalSizeInBytes); 442 return VD; 443 } 444 445 static ValueProfRecordClosure InstrProfRecordClosure = { 446 nullptr, 447 getNumValueKindsInstrProf, 448 getNumValueSitesInstrProf, 449 getNumValueDataInstrProf, 450 getNumValueDataForSiteInstrProf, 451 nullptr, 452 getValueForSiteInstrProf, 453 allocValueProfDataInstrProf}; 454 455 // Wrapper implementation using the closure mechanism. 456 uint32_t ValueProfData::getSize(const InstrProfRecord &Record) { 457 InstrProfRecordClosure.Record = &Record; 458 return getValueProfDataSize(&InstrProfRecordClosure); 459 } 460 461 // Wrapper implementation using the closure mechanism. 462 std::unique_ptr<ValueProfData> 463 ValueProfData::serializeFrom(const InstrProfRecord &Record) { 464 InstrProfRecordClosure.Record = &Record; 465 466 std::unique_ptr<ValueProfData> VPD( 467 serializeValueProfDataFrom(&InstrProfRecordClosure, nullptr)); 468 return VPD; 469 } 470 471 void ValueProfRecord::deserializeTo(InstrProfRecord &Record, 472 InstrProfRecord::ValueMapType *VMap) { 473 Record.reserveSites(Kind, NumValueSites); 474 475 InstrProfValueData *ValueData = getValueProfRecordValueData(this); 476 for (uint64_t VSite = 0; VSite < NumValueSites; ++VSite) { 477 uint8_t ValueDataCount = this->SiteCountArray[VSite]; 478 Record.addValueData(Kind, VSite, ValueData, ValueDataCount, VMap); 479 ValueData += ValueDataCount; 480 } 481 } 482 483 // For writing/serializing, Old is the host endianness, and New is 484 // byte order intended on disk. For Reading/deserialization, Old 485 // is the on-disk source endianness, and New is the host endianness. 486 void ValueProfRecord::swapBytes(support::endianness Old, 487 support::endianness New) { 488 using namespace support; 489 if (Old == New) 490 return; 491 492 if (getHostEndianness() != Old) { 493 sys::swapByteOrder<uint32_t>(NumValueSites); 494 sys::swapByteOrder<uint32_t>(Kind); 495 } 496 uint32_t ND = getValueProfRecordNumValueData(this); 497 InstrProfValueData *VD = getValueProfRecordValueData(this); 498 499 // No need to swap byte array: SiteCountArrray. 500 for (uint32_t I = 0; I < ND; I++) { 501 sys::swapByteOrder<uint64_t>(VD[I].Value); 502 sys::swapByteOrder<uint64_t>(VD[I].Count); 503 } 504 if (getHostEndianness() == Old) { 505 sys::swapByteOrder<uint32_t>(NumValueSites); 506 sys::swapByteOrder<uint32_t>(Kind); 507 } 508 } 509 510 void ValueProfData::deserializeTo(InstrProfRecord &Record, 511 InstrProfRecord::ValueMapType *VMap) { 512 if (NumValueKinds == 0) 513 return; 514 515 ValueProfRecord *VR = getFirstValueProfRecord(this); 516 for (uint32_t K = 0; K < NumValueKinds; K++) { 517 VR->deserializeTo(Record, VMap); 518 VR = getValueProfRecordNext(VR); 519 } 520 } 521 522 template <class T> 523 static T swapToHostOrder(const unsigned char *&D, support::endianness Orig) { 524 using namespace support; 525 if (Orig == little) 526 return endian::readNext<T, little, unaligned>(D); 527 else 528 return endian::readNext<T, big, unaligned>(D); 529 } 530 531 static std::unique_ptr<ValueProfData> allocValueProfData(uint32_t TotalSize) { 532 return std::unique_ptr<ValueProfData>(new (::operator new(TotalSize)) 533 ValueProfData()); 534 } 535 536 instrprof_error ValueProfData::checkIntegrity() { 537 if (NumValueKinds > IPVK_Last + 1) 538 return instrprof_error::malformed; 539 // Total size needs to be mulltiple of quadword size. 540 if (TotalSize % sizeof(uint64_t)) 541 return instrprof_error::malformed; 542 543 ValueProfRecord *VR = getFirstValueProfRecord(this); 544 for (uint32_t K = 0; K < this->NumValueKinds; K++) { 545 if (VR->Kind > IPVK_Last) 546 return instrprof_error::malformed; 547 VR = getValueProfRecordNext(VR); 548 if ((char *)VR - (char *)this > (ptrdiff_t)TotalSize) 549 return instrprof_error::malformed; 550 } 551 return instrprof_error::success; 552 } 553 554 ErrorOr<std::unique_ptr<ValueProfData>> 555 ValueProfData::getValueProfData(const unsigned char *D, 556 const unsigned char *const BufferEnd, 557 support::endianness Endianness) { 558 using namespace support; 559 if (D + sizeof(ValueProfData) > BufferEnd) 560 return instrprof_error::truncated; 561 562 const unsigned char *Header = D; 563 uint32_t TotalSize = swapToHostOrder<uint32_t>(Header, Endianness); 564 if (D + TotalSize > BufferEnd) 565 return instrprof_error::too_large; 566 567 std::unique_ptr<ValueProfData> VPD = allocValueProfData(TotalSize); 568 memcpy(VPD.get(), D, TotalSize); 569 // Byte swap. 570 VPD->swapBytesToHost(Endianness); 571 572 instrprof_error EC = VPD->checkIntegrity(); 573 if (EC != instrprof_error::success) 574 return EC; 575 576 return std::move(VPD); 577 } 578 579 void ValueProfData::swapBytesToHost(support::endianness Endianness) { 580 using namespace support; 581 if (Endianness == getHostEndianness()) 582 return; 583 584 sys::swapByteOrder<uint32_t>(TotalSize); 585 sys::swapByteOrder<uint32_t>(NumValueKinds); 586 587 ValueProfRecord *VR = getFirstValueProfRecord(this); 588 for (uint32_t K = 0; K < NumValueKinds; K++) { 589 VR->swapBytes(Endianness, getHostEndianness()); 590 VR = getValueProfRecordNext(VR); 591 } 592 } 593 594 void ValueProfData::swapBytesFromHost(support::endianness Endianness) { 595 using namespace support; 596 if (Endianness == getHostEndianness()) 597 return; 598 599 ValueProfRecord *VR = getFirstValueProfRecord(this); 600 for (uint32_t K = 0; K < NumValueKinds; K++) { 601 ValueProfRecord *NVR = getValueProfRecordNext(VR); 602 VR->swapBytes(getHostEndianness(), Endianness); 603 VR = NVR; 604 } 605 sys::swapByteOrder<uint32_t>(TotalSize); 606 sys::swapByteOrder<uint32_t>(NumValueKinds); 607 } 608 609 // The argument to this method is a vector of cutoff percentages and the return 610 // value is a vector of (Cutoff, MinBlockCount, NumBlocks) triplets. 611 void ProfileSummary::computeDetailedSummary() { 612 if (DetailedSummaryCutoffs.empty()) 613 return; 614 auto Iter = CountFrequencies.begin(); 615 auto End = CountFrequencies.end(); 616 std::sort(DetailedSummaryCutoffs.begin(), DetailedSummaryCutoffs.end()); 617 618 uint32_t BlocksSeen = 0; 619 uint64_t CurrSum = 0, Count = 0; 620 621 for (uint32_t Cutoff : DetailedSummaryCutoffs) { 622 assert(Cutoff <= 999999); 623 APInt Temp(128, TotalCount); 624 APInt N(128, Cutoff); 625 APInt D(128, ProfileSummary::Scale); 626 Temp *= N; 627 Temp = Temp.sdiv(D); 628 uint64_t DesiredCount = Temp.getZExtValue(); 629 assert(DesiredCount <= TotalCount); 630 while (CurrSum < DesiredCount && Iter != End) { 631 Count = Iter->first; 632 uint32_t Freq = Iter->second; 633 CurrSum += (Count * Freq); 634 BlocksSeen += Freq; 635 Iter++; 636 } 637 assert(CurrSum >= DesiredCount); 638 ProfileSummaryEntry PSE = {Cutoff, Count, BlocksSeen}; 639 DetailedSummary.push_back(PSE); 640 } 641 } 642 643 ProfileSummary::ProfileSummary(const IndexedInstrProf::Summary &S) 644 : TotalCount(S.get(IndexedInstrProf::Summary::TotalBlockCount)), 645 MaxBlockCount(S.get(IndexedInstrProf::Summary::MaxBlockCount)), 646 MaxInternalBlockCount( 647 S.get(IndexedInstrProf::Summary::MaxInternalBlockCount)), 648 MaxFunctionCount(S.get(IndexedInstrProf::Summary::MaxFunctionCount)), 649 NumBlocks(S.get(IndexedInstrProf::Summary::TotalNumBlocks)), 650 NumFunctions(S.get(IndexedInstrProf::Summary::TotalNumFunctions)) { 651 for (unsigned I = 0; I < S.NumCutoffEntries; I++) { 652 const IndexedInstrProf::Summary::Entry &Ent = S.getEntry(I); 653 DetailedSummary.emplace_back((uint32_t)Ent.Cutoff, Ent.MinBlockCount, 654 Ent.NumBlocks); 655 } 656 } 657 658 } // end namespace llvm 659