1 //===- RawMemProfReader.cpp - Instrumented memory profiling reader --------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file contains support for reading MemProf profiling data. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include <algorithm> 14 #include <cstdint> 15 #include <type_traits> 16 17 #include "llvm/ADT/ArrayRef.h" 18 #include "llvm/ADT/DenseMap.h" 19 #include "llvm/ADT/SmallVector.h" 20 #include "llvm/DebugInfo/DWARF/DWARFContext.h" 21 #include "llvm/DebugInfo/Symbolize/SymbolizableModule.h" 22 #include "llvm/DebugInfo/Symbolize/SymbolizableObjectFile.h" 23 #include "llvm/Object/Binary.h" 24 #include "llvm/Object/ELFObjectFile.h" 25 #include "llvm/Object/ObjectFile.h" 26 #include "llvm/ProfileData/InstrProf.h" 27 #include "llvm/ProfileData/MemProf.h" 28 #include "llvm/ProfileData/MemProfData.inc" 29 #include "llvm/ProfileData/RawMemProfReader.h" 30 #include "llvm/Support/Endian.h" 31 #include "llvm/Support/Path.h" 32 33 #define DEBUG_TYPE "memprof" 34 35 namespace llvm { 36 namespace memprof { 37 namespace { 38 39 struct Summary { 40 uint64_t Version; 41 uint64_t TotalSizeBytes; 42 uint64_t NumSegments; 43 uint64_t NumMIBInfo; 44 uint64_t NumStackOffsets; 45 }; 46 47 template <class T = uint64_t> inline T alignedRead(const char *Ptr) { 48 static_assert(std::is_pod<T>::value, "Not a pod type."); 49 assert(reinterpret_cast<size_t>(Ptr) % sizeof(T) == 0 && "Unaligned Read"); 50 return *reinterpret_cast<const T *>(Ptr); 51 } 52 53 Summary computeSummary(const char *Start) { 54 auto *H = reinterpret_cast<const Header *>(Start); 55 56 // Check alignment while reading the number of items in each section. 57 return Summary{ 58 H->Version, 59 H->TotalSize, 60 alignedRead(Start + H->SegmentOffset), 61 alignedRead(Start + H->MIBOffset), 62 alignedRead(Start + H->StackOffset), 63 }; 64 } 65 66 Error checkBuffer(const MemoryBuffer &Buffer) { 67 if (!RawMemProfReader::hasFormat(Buffer)) 68 return make_error<InstrProfError>(instrprof_error::bad_magic); 69 70 if (Buffer.getBufferSize() == 0) 71 return make_error<InstrProfError>(instrprof_error::empty_raw_profile); 72 73 if (Buffer.getBufferSize() < sizeof(Header)) { 74 return make_error<InstrProfError>(instrprof_error::truncated); 75 } 76 77 // The size of the buffer can be > header total size since we allow repeated 78 // serialization of memprof profiles to the same file. 79 uint64_t TotalSize = 0; 80 const char *Next = Buffer.getBufferStart(); 81 while (Next < Buffer.getBufferEnd()) { 82 auto *H = reinterpret_cast<const Header *>(Next); 83 if (H->Version != MEMPROF_RAW_VERSION) { 84 return make_error<InstrProfError>(instrprof_error::unsupported_version); 85 } 86 87 TotalSize += H->TotalSize; 88 Next += H->TotalSize; 89 } 90 91 if (Buffer.getBufferSize() != TotalSize) { 92 return make_error<InstrProfError>(instrprof_error::malformed); 93 } 94 return Error::success(); 95 } 96 97 llvm::SmallVector<SegmentEntry> readSegmentEntries(const char *Ptr) { 98 using namespace support; 99 100 const uint64_t NumItemsToRead = 101 endian::readNext<uint64_t, little, unaligned>(Ptr); 102 llvm::SmallVector<SegmentEntry> Items; 103 for (uint64_t I = 0; I < NumItemsToRead; I++) { 104 Items.push_back(*reinterpret_cast<const SegmentEntry *>( 105 Ptr + I * sizeof(SegmentEntry))); 106 } 107 return Items; 108 } 109 110 llvm::SmallVector<std::pair<uint64_t, MemInfoBlock>> 111 readMemInfoBlocks(const char *Ptr) { 112 using namespace support; 113 114 const uint64_t NumItemsToRead = 115 endian::readNext<uint64_t, little, unaligned>(Ptr); 116 llvm::SmallVector<std::pair<uint64_t, MemInfoBlock>> Items; 117 for (uint64_t I = 0; I < NumItemsToRead; I++) { 118 const uint64_t Id = endian::readNext<uint64_t, little, unaligned>(Ptr); 119 const MemInfoBlock MIB = *reinterpret_cast<const MemInfoBlock *>(Ptr); 120 Items.push_back({Id, MIB}); 121 // Only increment by size of MIB since readNext implicitly increments. 122 Ptr += sizeof(MemInfoBlock); 123 } 124 return Items; 125 } 126 127 CallStackMap readStackInfo(const char *Ptr) { 128 using namespace support; 129 130 const uint64_t NumItemsToRead = 131 endian::readNext<uint64_t, little, unaligned>(Ptr); 132 CallStackMap Items; 133 134 for (uint64_t I = 0; I < NumItemsToRead; I++) { 135 const uint64_t StackId = endian::readNext<uint64_t, little, unaligned>(Ptr); 136 const uint64_t NumPCs = endian::readNext<uint64_t, little, unaligned>(Ptr); 137 138 SmallVector<uint64_t> CallStack; 139 for (uint64_t J = 0; J < NumPCs; J++) { 140 CallStack.push_back(endian::readNext<uint64_t, little, unaligned>(Ptr)); 141 } 142 143 Items[StackId] = CallStack; 144 } 145 return Items; 146 } 147 148 // Merges the contents of stack information in \p From to \p To. Returns true if 149 // any stack ids observed previously map to a different set of program counter 150 // addresses. 151 bool mergeStackMap(const CallStackMap &From, CallStackMap &To) { 152 for (const auto &IdStack : From) { 153 auto I = To.find(IdStack.first); 154 if (I == To.end()) { 155 To[IdStack.first] = IdStack.second; 156 } else { 157 // Check that the PCs are the same (in order). 158 if (IdStack.second != I->second) 159 return true; 160 } 161 } 162 return false; 163 } 164 165 Error report(Error E, const StringRef Context) { 166 return joinErrors(createStringError(inconvertibleErrorCode(), Context), 167 std::move(E)); 168 } 169 170 bool isRuntimePath(const StringRef Path) { 171 return StringRef(llvm::sys::path::convert_to_slash(Path)) 172 .contains("memprof/memprof_"); 173 } 174 } // namespace 175 176 Expected<std::unique_ptr<RawMemProfReader>> 177 RawMemProfReader::create(const Twine &Path, const StringRef ProfiledBinary) { 178 auto BufferOr = MemoryBuffer::getFileOrSTDIN(Path); 179 if (std::error_code EC = BufferOr.getError()) 180 return report(errorCodeToError(EC), Path.getSingleStringRef()); 181 182 std::unique_ptr<MemoryBuffer> Buffer(BufferOr.get().release()); 183 if (Error E = checkBuffer(*Buffer)) 184 return report(std::move(E), Path.getSingleStringRef()); 185 186 if (ProfiledBinary.empty()) 187 return report( 188 errorCodeToError(make_error_code(std::errc::invalid_argument)), 189 "Path to profiled binary is empty!"); 190 191 auto BinaryOr = llvm::object::createBinary(ProfiledBinary); 192 if (!BinaryOr) { 193 return report(BinaryOr.takeError(), ProfiledBinary); 194 } 195 196 std::unique_ptr<RawMemProfReader> Reader( 197 new RawMemProfReader(std::move(Buffer), std::move(BinaryOr.get()))); 198 if (Error E = Reader->initialize()) { 199 return std::move(E); 200 } 201 return std::move(Reader); 202 } 203 204 bool RawMemProfReader::hasFormat(const StringRef Path) { 205 auto BufferOr = MemoryBuffer::getFileOrSTDIN(Path); 206 if (!BufferOr) 207 return false; 208 209 std::unique_ptr<MemoryBuffer> Buffer(BufferOr.get().release()); 210 return hasFormat(*Buffer); 211 } 212 213 bool RawMemProfReader::hasFormat(const MemoryBuffer &Buffer) { 214 if (Buffer.getBufferSize() < sizeof(uint64_t)) 215 return false; 216 // Aligned read to sanity check that the buffer was allocated with at least 8b 217 // alignment. 218 const uint64_t Magic = alignedRead(Buffer.getBufferStart()); 219 return Magic == MEMPROF_RAW_MAGIC_64; 220 } 221 222 void RawMemProfReader::printYAML(raw_ostream &OS) { 223 OS << "MemprofProfile:\n"; 224 // TODO: Update printSummaries to print out the data after the profile has 225 // been symbolized and pruned. We can parse some raw profile characteristics 226 // from the data buffer for additional information. 227 printSummaries(OS); 228 // Print out the merged contents of the profiles. 229 OS << " Records:\n"; 230 for (const auto &Entry : *this) { 231 OS << " -\n"; 232 OS << " FunctionGUID: " << Entry.first << "\n"; 233 Entry.second.print(OS); 234 } 235 } 236 237 void RawMemProfReader::printSummaries(raw_ostream &OS) const { 238 const char *Next = DataBuffer->getBufferStart(); 239 while (Next < DataBuffer->getBufferEnd()) { 240 auto Summary = computeSummary(Next); 241 OS << " -\n"; 242 OS << " Header:\n"; 243 OS << " Version: " << Summary.Version << "\n"; 244 OS << " TotalSizeBytes: " << Summary.TotalSizeBytes << "\n"; 245 OS << " NumSegments: " << Summary.NumSegments << "\n"; 246 OS << " NumMibInfo: " << Summary.NumMIBInfo << "\n"; 247 OS << " NumStackOffsets: " << Summary.NumStackOffsets << "\n"; 248 // TODO: Print the build ids once we can record them using the 249 // sanitizer_procmaps library for linux. 250 251 auto *H = reinterpret_cast<const Header *>(Next); 252 Next += H->TotalSize; 253 } 254 } 255 256 Error RawMemProfReader::initialize() { 257 const StringRef FileName = Binary.getBinary()->getFileName(); 258 259 auto *ElfObject = dyn_cast<object::ELFObjectFileBase>(Binary.getBinary()); 260 if (!ElfObject) { 261 return report(make_error<StringError>(Twine("Not an ELF file: "), 262 inconvertibleErrorCode()), 263 FileName); 264 } 265 266 auto Triple = ElfObject->makeTriple(); 267 if (!Triple.isX86()) 268 return report(make_error<StringError>(Twine("Unsupported target: ") + 269 Triple.getArchName(), 270 inconvertibleErrorCode()), 271 FileName); 272 273 auto *Object = cast<object::ObjectFile>(Binary.getBinary()); 274 std::unique_ptr<DIContext> Context = DWARFContext::create( 275 *Object, DWARFContext::ProcessDebugRelocations::Process); 276 277 auto SOFOr = symbolize::SymbolizableObjectFile::create( 278 Object, std::move(Context), /*UntagAddresses=*/false); 279 if (!SOFOr) 280 return report(SOFOr.takeError(), FileName); 281 Symbolizer = std::move(SOFOr.get()); 282 283 if (Error E = readRawProfile()) 284 return E; 285 286 if (Error E = symbolizeAndFilterStackFrames()) 287 return E; 288 289 return mapRawProfileToRecords(); 290 } 291 292 Error RawMemProfReader::mapRawProfileToRecords() { 293 // Hold a mapping from function to each callsite location we encounter within 294 // it that is part of some dynamic allocation context. The location is stored 295 // as a pointer to a symbolized list of inline frames. 296 using LocationPtr = const llvm::SmallVector<FrameId> *; 297 llvm::DenseMap<GlobalValue::GUID, llvm::SetVector<LocationPtr>> 298 PerFunctionCallSites; 299 300 // Convert the raw profile callstack data into memprof records. While doing so 301 // keep track of related contexts so that we can fill these in later. 302 for (const auto &Entry : CallstackProfileData) { 303 const uint64_t StackId = Entry.first; 304 305 auto It = StackMap.find(StackId); 306 if (It == StackMap.end()) 307 return make_error<InstrProfError>( 308 instrprof_error::malformed, 309 "memprof callstack record does not contain id: " + Twine(StackId)); 310 311 // Construct the symbolized callstack. 312 llvm::SmallVector<FrameId> Callstack; 313 Callstack.reserve(It->getSecond().size()); 314 315 llvm::ArrayRef<uint64_t> Addresses = It->getSecond(); 316 for (size_t I = 0; I < Addresses.size(); I++) { 317 const uint64_t Address = Addresses[I]; 318 assert(SymbolizedFrame.count(Address) > 0 && 319 "Address not found in SymbolizedFrame map"); 320 const SmallVector<FrameId> &Frames = SymbolizedFrame[Address]; 321 322 assert(!idToFrame(Frames.back()).IsInlineFrame && 323 "The last frame should not be inlined"); 324 325 // Record the callsites for each function. Skip the first frame of the 326 // first address since it is the allocation site itself that is recorded 327 // as an alloc site. 328 for (size_t J = 0; J < Frames.size(); J++) { 329 if (I == 0 && J == 0) 330 continue; 331 // We attach the entire bottom-up frame here for the callsite even 332 // though we only need the frames up to and including the frame for 333 // Frames[J].Function. This will enable better deduplication for 334 // compression in the future. 335 const GlobalValue::GUID Guid = idToFrame(Frames[J]).Function; 336 PerFunctionCallSites[Guid].insert(&Frames); 337 } 338 339 // Add all the frames to the current allocation callstack. 340 Callstack.append(Frames.begin(), Frames.end()); 341 } 342 343 // We attach the memprof record to each function bottom-up including the 344 // first non-inline frame. 345 for (size_t I = 0; /*Break out using the condition below*/; I++) { 346 const Frame &F = idToFrame(Callstack[I]); 347 auto Result = 348 FunctionProfileData.insert({F.Function, IndexedMemProfRecord()}); 349 IndexedMemProfRecord &Record = Result.first->second; 350 Record.AllocSites.emplace_back(Callstack, Entry.second); 351 352 if (!F.IsInlineFrame) 353 break; 354 } 355 } 356 357 // Fill in the related callsites per function. 358 for (auto I = PerFunctionCallSites.begin(), E = PerFunctionCallSites.end(); 359 I != E; I++) { 360 const GlobalValue::GUID Id = I->first; 361 // Some functions may have only callsite data and no allocation data. Here 362 // we insert a new entry for callsite data if we need to. 363 auto Result = FunctionProfileData.insert({Id, IndexedMemProfRecord()}); 364 IndexedMemProfRecord &Record = Result.first->second; 365 for (LocationPtr Loc : I->getSecond()) { 366 Record.CallSites.push_back(*Loc); 367 } 368 } 369 370 return Error::success(); 371 } 372 373 Error RawMemProfReader::symbolizeAndFilterStackFrames() { 374 // The specifier to use when symbolization is requested. 375 const DILineInfoSpecifier Specifier( 376 DILineInfoSpecifier::FileLineInfoKind::RawValue, 377 DILineInfoSpecifier::FunctionNameKind::LinkageName); 378 379 // For entries where all PCs in the callstack are discarded, we erase the 380 // entry from the stack map. 381 llvm::SmallVector<uint64_t> EntriesToErase; 382 // We keep track of all prior discarded entries so that we can avoid invoking 383 // the symbolizer for such entries. 384 llvm::DenseSet<uint64_t> AllVAddrsToDiscard; 385 for (auto &Entry : StackMap) { 386 for (const uint64_t VAddr : Entry.getSecond()) { 387 // Check if we have already symbolized and cached the result or if we 388 // don't want to attempt symbolization since we know this address is bad. 389 // In this case the address is also removed from the current callstack. 390 if (SymbolizedFrame.count(VAddr) > 0 || 391 AllVAddrsToDiscard.contains(VAddr)) 392 continue; 393 394 Expected<DIInliningInfo> DIOr = Symbolizer->symbolizeInlinedCode( 395 getModuleOffset(VAddr), Specifier, /*UseSymbolTable=*/false); 396 if (!DIOr) 397 return DIOr.takeError(); 398 DIInliningInfo DI = DIOr.get(); 399 400 // Drop frames which we can't symbolize or if they belong to the runtime. 401 if (DI.getFrame(0).FunctionName == DILineInfo::BadString || 402 isRuntimePath(DI.getFrame(0).FileName)) { 403 AllVAddrsToDiscard.insert(VAddr); 404 continue; 405 } 406 407 for (size_t I = 0, NumFrames = DI.getNumberOfFrames(); I < NumFrames; 408 I++) { 409 const auto &DIFrame = DI.getFrame(I); 410 LLVM_DEBUG( 411 // Print out the name to guid mapping for debugging. 412 llvm::dbgs() << "FunctionName: " << DIFrame.FunctionName 413 << " GUID: " 414 << IndexedMemProfRecord::getGUID(DIFrame.FunctionName) 415 << "\n";); 416 417 const Frame F(IndexedMemProfRecord::getGUID(DIFrame.FunctionName), 418 DIFrame.Line - DIFrame.StartLine, DIFrame.Column, 419 // Only the last entry is not an inlined location. 420 I != NumFrames - 1); 421 422 const FrameId Id = F.hash(); 423 IdToFrame.insert({Id, F}); 424 SymbolizedFrame[VAddr].push_back(Id); 425 } 426 } 427 428 auto &CallStack = Entry.getSecond(); 429 CallStack.erase(std::remove_if(CallStack.begin(), CallStack.end(), 430 [&AllVAddrsToDiscard](const uint64_t A) { 431 return AllVAddrsToDiscard.contains(A); 432 }), 433 CallStack.end()); 434 if (CallStack.empty()) 435 EntriesToErase.push_back(Entry.getFirst()); 436 } 437 438 // Drop the entries where the callstack is empty. 439 for (const uint64_t Id : EntriesToErase) { 440 StackMap.erase(Id); 441 CallstackProfileData.erase(Id); 442 } 443 444 if (StackMap.empty()) 445 return make_error<InstrProfError>( 446 instrprof_error::malformed, 447 "no entries in callstack map after symbolization"); 448 449 return Error::success(); 450 } 451 452 Error RawMemProfReader::readRawProfile() { 453 const char *Next = DataBuffer->getBufferStart(); 454 455 while (Next < DataBuffer->getBufferEnd()) { 456 auto *Header = reinterpret_cast<const memprof::Header *>(Next); 457 458 // Read in the segment information, check whether its the same across all 459 // profiles in this binary file. 460 const llvm::SmallVector<SegmentEntry> Entries = 461 readSegmentEntries(Next + Header->SegmentOffset); 462 if (!SegmentInfo.empty() && SegmentInfo != Entries) { 463 // We do not expect segment information to change when deserializing from 464 // the same binary profile file. This can happen if dynamic libraries are 465 // loaded/unloaded between profile dumping. 466 return make_error<InstrProfError>( 467 instrprof_error::malformed, 468 "memprof raw profile has different segment information"); 469 } 470 SegmentInfo.assign(Entries.begin(), Entries.end()); 471 472 // Read in the MemInfoBlocks. Merge them based on stack id - we assume that 473 // raw profiles in the same binary file are from the same process so the 474 // stackdepot ids are the same. 475 for (const auto &Value : readMemInfoBlocks(Next + Header->MIBOffset)) { 476 if (CallstackProfileData.count(Value.first)) { 477 CallstackProfileData[Value.first].Merge(Value.second); 478 } else { 479 CallstackProfileData[Value.first] = Value.second; 480 } 481 } 482 483 // Read in the callstack for each ids. For multiple raw profiles in the same 484 // file, we expect that the callstack is the same for a unique id. 485 const CallStackMap CSM = readStackInfo(Next + Header->StackOffset); 486 if (StackMap.empty()) { 487 StackMap = CSM; 488 } else { 489 if (mergeStackMap(CSM, StackMap)) 490 return make_error<InstrProfError>( 491 instrprof_error::malformed, 492 "memprof raw profile got different call stack for same id"); 493 } 494 495 Next += Header->TotalSize; 496 } 497 498 return Error::success(); 499 } 500 501 object::SectionedAddress 502 RawMemProfReader::getModuleOffset(const uint64_t VirtualAddress) { 503 LLVM_DEBUG({ 504 SegmentEntry *ContainingSegment = nullptr; 505 for (auto &SE : SegmentInfo) { 506 if (VirtualAddress > SE.Start && VirtualAddress <= SE.End) { 507 ContainingSegment = &SE; 508 } 509 } 510 511 // Ensure that the virtual address is valid. 512 assert(ContainingSegment && "Could not find a segment entry"); 513 }); 514 515 // TODO: Compute the file offset based on the maps and program headers. For 516 // now this only works for non PIE binaries. 517 return object::SectionedAddress{VirtualAddress}; 518 } 519 520 Error RawMemProfReader::readNextRecord(GuidMemProfRecordPair &GuidRecord) { 521 if (FunctionProfileData.empty()) 522 return make_error<InstrProfError>(instrprof_error::empty_raw_profile); 523 524 if (Iter == FunctionProfileData.end()) 525 return make_error<InstrProfError>(instrprof_error::eof); 526 527 auto IdToFrameCallback = [this](const FrameId Id) { 528 return this->idToFrame(Id); 529 }; 530 const IndexedMemProfRecord &IndexedRecord = Iter->second; 531 GuidRecord = {Iter->first, MemProfRecord(IndexedRecord, IdToFrameCallback)}; 532 Iter++; 533 return Error::success(); 534 } 535 } // namespace memprof 536 } // namespace llvm 537