1 //===- RawMemProfReader.cpp - Instrumented memory profiling reader --------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file contains support for reading MemProf profiling data. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include <algorithm> 14 #include <cstdint> 15 #include <type_traits> 16 17 #include "llvm/ADT/ArrayRef.h" 18 #include "llvm/ADT/DenseMap.h" 19 #include "llvm/ADT/STLExtras.h" 20 #include "llvm/ADT/SmallVector.h" 21 #include "llvm/DebugInfo/DWARF/DWARFContext.h" 22 #include "llvm/DebugInfo/Symbolize/SymbolizableModule.h" 23 #include "llvm/DebugInfo/Symbolize/SymbolizableObjectFile.h" 24 #include "llvm/Object/Binary.h" 25 #include "llvm/Object/ELFObjectFile.h" 26 #include "llvm/Object/ObjectFile.h" 27 #include "llvm/ProfileData/InstrProf.h" 28 #include "llvm/ProfileData/MemProf.h" 29 #include "llvm/ProfileData/MemProfData.inc" 30 #include "llvm/ProfileData/RawMemProfReader.h" 31 #include "llvm/Support/Endian.h" 32 #include "llvm/Support/Path.h" 33 34 #define DEBUG_TYPE "memprof" 35 36 namespace llvm { 37 namespace memprof { 38 namespace { 39 40 struct Summary { 41 uint64_t Version; 42 uint64_t TotalSizeBytes; 43 uint64_t NumSegments; 44 uint64_t NumMIBInfo; 45 uint64_t NumStackOffsets; 46 }; 47 48 template <class T = uint64_t> inline T alignedRead(const char *Ptr) { 49 static_assert(std::is_pod<T>::value, "Not a pod type."); 50 assert(reinterpret_cast<size_t>(Ptr) % sizeof(T) == 0 && "Unaligned Read"); 51 return *reinterpret_cast<const T *>(Ptr); 52 } 53 54 Summary computeSummary(const char *Start) { 55 auto *H = reinterpret_cast<const Header *>(Start); 56 57 // Check alignment while reading the number of items in each section. 58 return Summary{ 59 H->Version, 60 H->TotalSize, 61 alignedRead(Start + H->SegmentOffset), 62 alignedRead(Start + H->MIBOffset), 63 alignedRead(Start + H->StackOffset), 64 }; 65 } 66 67 Error checkBuffer(const MemoryBuffer &Buffer) { 68 if (!RawMemProfReader::hasFormat(Buffer)) 69 return make_error<InstrProfError>(instrprof_error::bad_magic); 70 71 if (Buffer.getBufferSize() == 0) 72 return make_error<InstrProfError>(instrprof_error::empty_raw_profile); 73 74 if (Buffer.getBufferSize() < sizeof(Header)) { 75 return make_error<InstrProfError>(instrprof_error::truncated); 76 } 77 78 // The size of the buffer can be > header total size since we allow repeated 79 // serialization of memprof profiles to the same file. 80 uint64_t TotalSize = 0; 81 const char *Next = Buffer.getBufferStart(); 82 while (Next < Buffer.getBufferEnd()) { 83 auto *H = reinterpret_cast<const Header *>(Next); 84 if (H->Version != MEMPROF_RAW_VERSION) { 85 return make_error<InstrProfError>(instrprof_error::unsupported_version); 86 } 87 88 TotalSize += H->TotalSize; 89 Next += H->TotalSize; 90 } 91 92 if (Buffer.getBufferSize() != TotalSize) { 93 return make_error<InstrProfError>(instrprof_error::malformed); 94 } 95 return Error::success(); 96 } 97 98 llvm::SmallVector<SegmentEntry> readSegmentEntries(const char *Ptr) { 99 using namespace support; 100 101 const uint64_t NumItemsToRead = 102 endian::readNext<uint64_t, little, unaligned>(Ptr); 103 llvm::SmallVector<SegmentEntry> Items; 104 for (uint64_t I = 0; I < NumItemsToRead; I++) { 105 Items.push_back(*reinterpret_cast<const SegmentEntry *>( 106 Ptr + I * sizeof(SegmentEntry))); 107 } 108 return Items; 109 } 110 111 llvm::SmallVector<std::pair<uint64_t, MemInfoBlock>> 112 readMemInfoBlocks(const char *Ptr) { 113 using namespace support; 114 115 const uint64_t NumItemsToRead = 116 endian::readNext<uint64_t, little, unaligned>(Ptr); 117 llvm::SmallVector<std::pair<uint64_t, MemInfoBlock>> Items; 118 for (uint64_t I = 0; I < NumItemsToRead; I++) { 119 const uint64_t Id = endian::readNext<uint64_t, little, unaligned>(Ptr); 120 const MemInfoBlock MIB = *reinterpret_cast<const MemInfoBlock *>(Ptr); 121 Items.push_back({Id, MIB}); 122 // Only increment by size of MIB since readNext implicitly increments. 123 Ptr += sizeof(MemInfoBlock); 124 } 125 return Items; 126 } 127 128 CallStackMap readStackInfo(const char *Ptr) { 129 using namespace support; 130 131 const uint64_t NumItemsToRead = 132 endian::readNext<uint64_t, little, unaligned>(Ptr); 133 CallStackMap Items; 134 135 for (uint64_t I = 0; I < NumItemsToRead; I++) { 136 const uint64_t StackId = endian::readNext<uint64_t, little, unaligned>(Ptr); 137 const uint64_t NumPCs = endian::readNext<uint64_t, little, unaligned>(Ptr); 138 139 SmallVector<uint64_t> CallStack; 140 for (uint64_t J = 0; J < NumPCs; J++) { 141 CallStack.push_back(endian::readNext<uint64_t, little, unaligned>(Ptr)); 142 } 143 144 Items[StackId] = CallStack; 145 } 146 return Items; 147 } 148 149 // Merges the contents of stack information in \p From to \p To. Returns true if 150 // any stack ids observed previously map to a different set of program counter 151 // addresses. 152 bool mergeStackMap(const CallStackMap &From, CallStackMap &To) { 153 for (const auto &IdStack : From) { 154 auto I = To.find(IdStack.first); 155 if (I == To.end()) { 156 To[IdStack.first] = IdStack.second; 157 } else { 158 // Check that the PCs are the same (in order). 159 if (IdStack.second != I->second) 160 return true; 161 } 162 } 163 return false; 164 } 165 166 Error report(Error E, const StringRef Context) { 167 return joinErrors(createStringError(inconvertibleErrorCode(), Context), 168 std::move(E)); 169 } 170 171 bool isRuntimePath(const StringRef Path) { 172 return StringRef(llvm::sys::path::convert_to_slash(Path)) 173 .contains("memprof/memprof_"); 174 } 175 } // namespace 176 177 Expected<std::unique_ptr<RawMemProfReader>> 178 RawMemProfReader::create(const Twine &Path, const StringRef ProfiledBinary) { 179 auto BufferOr = MemoryBuffer::getFileOrSTDIN(Path); 180 if (std::error_code EC = BufferOr.getError()) 181 return report(errorCodeToError(EC), Path.getSingleStringRef()); 182 183 std::unique_ptr<MemoryBuffer> Buffer(BufferOr.get().release()); 184 if (Error E = checkBuffer(*Buffer)) 185 return report(std::move(E), Path.getSingleStringRef()); 186 187 if (ProfiledBinary.empty()) 188 return report( 189 errorCodeToError(make_error_code(std::errc::invalid_argument)), 190 "Path to profiled binary is empty!"); 191 192 auto BinaryOr = llvm::object::createBinary(ProfiledBinary); 193 if (!BinaryOr) { 194 return report(BinaryOr.takeError(), ProfiledBinary); 195 } 196 197 std::unique_ptr<RawMemProfReader> Reader( 198 new RawMemProfReader(std::move(Buffer), std::move(BinaryOr.get()))); 199 if (Error E = Reader->initialize()) { 200 return std::move(E); 201 } 202 return std::move(Reader); 203 } 204 205 bool RawMemProfReader::hasFormat(const StringRef Path) { 206 auto BufferOr = MemoryBuffer::getFileOrSTDIN(Path); 207 if (!BufferOr) 208 return false; 209 210 std::unique_ptr<MemoryBuffer> Buffer(BufferOr.get().release()); 211 return hasFormat(*Buffer); 212 } 213 214 bool RawMemProfReader::hasFormat(const MemoryBuffer &Buffer) { 215 if (Buffer.getBufferSize() < sizeof(uint64_t)) 216 return false; 217 // Aligned read to sanity check that the buffer was allocated with at least 8b 218 // alignment. 219 const uint64_t Magic = alignedRead(Buffer.getBufferStart()); 220 return Magic == MEMPROF_RAW_MAGIC_64; 221 } 222 223 void RawMemProfReader::printYAML(raw_ostream &OS) { 224 OS << "MemprofProfile:\n"; 225 // TODO: Update printSummaries to print out the data after the profile has 226 // been symbolized and pruned. We can parse some raw profile characteristics 227 // from the data buffer for additional information. 228 printSummaries(OS); 229 // Print out the merged contents of the profiles. 230 OS << " Records:\n"; 231 for (const auto &Entry : *this) { 232 OS << " -\n"; 233 OS << " FunctionGUID: " << Entry.first << "\n"; 234 Entry.second.print(OS); 235 } 236 } 237 238 void RawMemProfReader::printSummaries(raw_ostream &OS) const { 239 const char *Next = DataBuffer->getBufferStart(); 240 while (Next < DataBuffer->getBufferEnd()) { 241 auto Summary = computeSummary(Next); 242 OS << " -\n"; 243 OS << " Header:\n"; 244 OS << " Version: " << Summary.Version << "\n"; 245 OS << " TotalSizeBytes: " << Summary.TotalSizeBytes << "\n"; 246 OS << " NumSegments: " << Summary.NumSegments << "\n"; 247 OS << " NumMibInfo: " << Summary.NumMIBInfo << "\n"; 248 OS << " NumStackOffsets: " << Summary.NumStackOffsets << "\n"; 249 // TODO: Print the build ids once we can record them using the 250 // sanitizer_procmaps library for linux. 251 252 auto *H = reinterpret_cast<const Header *>(Next); 253 Next += H->TotalSize; 254 } 255 } 256 257 Error RawMemProfReader::initialize() { 258 const StringRef FileName = Binary.getBinary()->getFileName(); 259 260 auto *ElfObject = dyn_cast<object::ELFObjectFileBase>(Binary.getBinary()); 261 if (!ElfObject) { 262 return report(make_error<StringError>(Twine("Not an ELF file: "), 263 inconvertibleErrorCode()), 264 FileName); 265 } 266 267 auto Triple = ElfObject->makeTriple(); 268 if (!Triple.isX86()) 269 return report(make_error<StringError>(Twine("Unsupported target: ") + 270 Triple.getArchName(), 271 inconvertibleErrorCode()), 272 FileName); 273 274 auto *Object = cast<object::ObjectFile>(Binary.getBinary()); 275 std::unique_ptr<DIContext> Context = DWARFContext::create( 276 *Object, DWARFContext::ProcessDebugRelocations::Process); 277 278 auto SOFOr = symbolize::SymbolizableObjectFile::create( 279 Object, std::move(Context), /*UntagAddresses=*/false); 280 if (!SOFOr) 281 return report(SOFOr.takeError(), FileName); 282 Symbolizer = std::move(SOFOr.get()); 283 284 if (Error E = readRawProfile()) 285 return E; 286 287 if (Error E = symbolizeAndFilterStackFrames()) 288 return E; 289 290 return mapRawProfileToRecords(); 291 } 292 293 Error RawMemProfReader::mapRawProfileToRecords() { 294 // Hold a mapping from function to each callsite location we encounter within 295 // it that is part of some dynamic allocation context. The location is stored 296 // as a pointer to a symbolized list of inline frames. 297 using LocationPtr = const llvm::SmallVector<MemProfRecord::Frame> *; 298 llvm::DenseMap<GlobalValue::GUID, llvm::SetVector<LocationPtr>> 299 PerFunctionCallSites; 300 301 // Convert the raw profile callstack data into memprof records. While doing so 302 // keep track of related contexts so that we can fill these in later. 303 for (const auto &Entry : CallstackProfileData) { 304 const uint64_t StackId = Entry.first; 305 306 auto It = StackMap.find(StackId); 307 if (It == StackMap.end()) 308 return make_error<InstrProfError>( 309 instrprof_error::malformed, 310 "memprof callstack record does not contain id: " + Twine(StackId)); 311 312 // Construct the symbolized callstack. 313 llvm::SmallVector<MemProfRecord::Frame> Callstack; 314 Callstack.reserve(It->getSecond().size()); 315 316 llvm::ArrayRef<uint64_t> Addresses = It->getSecond(); 317 for (size_t I = 0; I < Addresses.size(); I++) { 318 const uint64_t Address = Addresses[I]; 319 assert(SymbolizedFrame.count(Address) > 0 && 320 "Address not found in SymbolizedFrame map"); 321 const SmallVector<MemProfRecord::Frame> &Frames = 322 SymbolizedFrame[Address]; 323 324 assert(!Frames.back().IsInlineFrame && 325 "The last frame should not be inlined"); 326 327 // Record the callsites for each function. Skip the first frame of the 328 // first address since it is the allocation site itself that is recorded 329 // as an alloc site. 330 for (size_t J = 0; J < Frames.size(); J++) { 331 if (I == 0 && J == 0) 332 continue; 333 // We attach the entire bottom-up frame here for the callsite even 334 // though we only need the frames up to and including the frame for 335 // Frames[J].Function. This will enable better deduplication for 336 // compression in the future. 337 PerFunctionCallSites[Frames[J].Function].insert(&Frames); 338 } 339 340 // Add all the frames to the current allocation callstack. 341 Callstack.append(Frames.begin(), Frames.end()); 342 } 343 344 // We attach the memprof record to each function bottom-up including the 345 // first non-inline frame. 346 for (size_t I = 0; /*Break out using the condition below*/; I++) { 347 auto Result = 348 FunctionProfileData.insert({Callstack[I].Function, MemProfRecord()}); 349 MemProfRecord &Record = Result.first->second; 350 Record.AllocSites.emplace_back(Callstack, Entry.second); 351 352 if (!Callstack[I].IsInlineFrame) 353 break; 354 } 355 } 356 357 // Fill in the related callsites per function. 358 for (auto I = PerFunctionCallSites.begin(), E = PerFunctionCallSites.end(); 359 I != E; I++) { 360 const GlobalValue::GUID Id = I->first; 361 // Some functions may have only callsite data and no allocation data. Here 362 // we insert a new entry for callsite data if we need to. 363 auto Result = FunctionProfileData.insert({Id, MemProfRecord()}); 364 MemProfRecord &Record = Result.first->second; 365 for (LocationPtr Loc : I->getSecond()) { 366 Record.CallSites.push_back(*Loc); 367 } 368 } 369 370 return Error::success(); 371 } 372 373 Error RawMemProfReader::symbolizeAndFilterStackFrames() { 374 // The specifier to use when symbolization is requested. 375 const DILineInfoSpecifier Specifier( 376 DILineInfoSpecifier::FileLineInfoKind::RawValue, 377 DILineInfoSpecifier::FunctionNameKind::LinkageName); 378 379 // For entries where all PCs in the callstack are discarded, we erase the 380 // entry from the stack map. 381 llvm::SmallVector<uint64_t> EntriesToErase; 382 // We keep track of all prior discarded entries so that we can avoid invoking 383 // the symbolizer for such entries. 384 llvm::DenseSet<uint64_t> AllVAddrsToDiscard; 385 for (auto &Entry : StackMap) { 386 for (const uint64_t VAddr : Entry.getSecond()) { 387 // Check if we have already symbolized and cached the result or if we 388 // don't want to attempt symbolization since we know this address is bad. 389 // In this case the address is also removed from the current callstack. 390 if (SymbolizedFrame.count(VAddr) > 0 || 391 AllVAddrsToDiscard.contains(VAddr)) 392 continue; 393 394 Expected<DIInliningInfo> DIOr = Symbolizer->symbolizeInlinedCode( 395 getModuleOffset(VAddr), Specifier, /*UseSymbolTable=*/false); 396 if (!DIOr) 397 return DIOr.takeError(); 398 DIInliningInfo DI = DIOr.get(); 399 400 // Drop frames which we can't symbolize or if they belong to the runtime. 401 if (DI.getFrame(0).FunctionName == DILineInfo::BadString || 402 isRuntimePath(DI.getFrame(0).FileName)) { 403 AllVAddrsToDiscard.insert(VAddr); 404 continue; 405 } 406 407 for (size_t I = 0, NumFrames = DI.getNumberOfFrames(); I < NumFrames; 408 I++) { 409 const auto &Frame = DI.getFrame(I); 410 LLVM_DEBUG( 411 // Print out the name to guid mapping for debugging. 412 llvm::dbgs() << "FunctionName: " << Frame.FunctionName << " GUID: " 413 << MemProfRecord::getGUID(Frame.FunctionName) 414 << "\n";); 415 SymbolizedFrame[VAddr].emplace_back( 416 MemProfRecord::getGUID(Frame.FunctionName), 417 Frame.Line - Frame.StartLine, Frame.Column, 418 // Only the last entry is not an inlined location. 419 I != NumFrames - 1); 420 } 421 } 422 423 auto &CallStack = Entry.getSecond(); 424 CallStack.erase(std::remove_if(CallStack.begin(), CallStack.end(), 425 [&AllVAddrsToDiscard](const uint64_t A) { 426 return AllVAddrsToDiscard.contains(A); 427 }), 428 CallStack.end()); 429 if (CallStack.empty()) 430 EntriesToErase.push_back(Entry.getFirst()); 431 } 432 433 // Drop the entries where the callstack is empty. 434 for (const uint64_t Id : EntriesToErase) { 435 StackMap.erase(Id); 436 CallstackProfileData.erase(Id); 437 } 438 439 if (StackMap.empty()) 440 return make_error<InstrProfError>( 441 instrprof_error::malformed, 442 "no entries in callstack map after symbolization"); 443 444 return Error::success(); 445 } 446 447 Error RawMemProfReader::readRawProfile() { 448 const char *Next = DataBuffer->getBufferStart(); 449 450 while (Next < DataBuffer->getBufferEnd()) { 451 auto *Header = reinterpret_cast<const memprof::Header *>(Next); 452 453 // Read in the segment information, check whether its the same across all 454 // profiles in this binary file. 455 const llvm::SmallVector<SegmentEntry> Entries = 456 readSegmentEntries(Next + Header->SegmentOffset); 457 if (!SegmentInfo.empty() && SegmentInfo != Entries) { 458 // We do not expect segment information to change when deserializing from 459 // the same binary profile file. This can happen if dynamic libraries are 460 // loaded/unloaded between profile dumping. 461 return make_error<InstrProfError>( 462 instrprof_error::malformed, 463 "memprof raw profile has different segment information"); 464 } 465 SegmentInfo.assign(Entries.begin(), Entries.end()); 466 467 // Read in the MemInfoBlocks. Merge them based on stack id - we assume that 468 // raw profiles in the same binary file are from the same process so the 469 // stackdepot ids are the same. 470 for (const auto &Value : readMemInfoBlocks(Next + Header->MIBOffset)) { 471 if (CallstackProfileData.count(Value.first)) { 472 CallstackProfileData[Value.first].Merge(Value.second); 473 } else { 474 CallstackProfileData[Value.first] = Value.second; 475 } 476 } 477 478 // Read in the callstack for each ids. For multiple raw profiles in the same 479 // file, we expect that the callstack is the same for a unique id. 480 const CallStackMap CSM = readStackInfo(Next + Header->StackOffset); 481 if (StackMap.empty()) { 482 StackMap = CSM; 483 } else { 484 if (mergeStackMap(CSM, StackMap)) 485 return make_error<InstrProfError>( 486 instrprof_error::malformed, 487 "memprof raw profile got different call stack for same id"); 488 } 489 490 Next += Header->TotalSize; 491 } 492 493 return Error::success(); 494 } 495 496 object::SectionedAddress 497 RawMemProfReader::getModuleOffset(const uint64_t VirtualAddress) { 498 LLVM_DEBUG({ 499 SegmentEntry *ContainingSegment = nullptr; 500 for (auto &SE : SegmentInfo) { 501 if (VirtualAddress > SE.Start && VirtualAddress <= SE.End) { 502 ContainingSegment = &SE; 503 } 504 } 505 506 // Ensure that the virtual address is valid. 507 assert(ContainingSegment && "Could not find a segment entry"); 508 }); 509 510 // TODO: Compute the file offset based on the maps and program headers. For 511 // now this only works for non PIE binaries. 512 return object::SectionedAddress{VirtualAddress}; 513 } 514 515 Error RawMemProfReader::readNextRecord(GuidMemProfRecordPair &GuidRecord) { 516 if (FunctionProfileData.empty()) 517 return make_error<InstrProfError>(instrprof_error::empty_raw_profile); 518 519 if (Iter == FunctionProfileData.end()) 520 return make_error<InstrProfError>(instrprof_error::eof); 521 522 GuidRecord = {Iter->first, Iter->second}; 523 Iter++; 524 return Error::success(); 525 } 526 } // namespace memprof 527 } // namespace llvm 528