1 //=-- InstrProfWriter.cpp - Instrumented profiling writer -------------------=// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file contains support for writing profiling data for clang's 11 // instrumentation based PGO and coverage. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/ProfileData/InstrProfWriter.h" 16 #include "llvm/ADT/StringExtras.h" 17 #include "llvm/Support/EndianStream.h" 18 #include "llvm/Support/OnDiskHashTable.h" 19 20 using namespace llvm; 21 22 namespace { 23 static support::endianness ValueProfDataEndianness = support::little; 24 25 class InstrProfRecordTrait { 26 public: 27 typedef StringRef key_type; 28 typedef StringRef key_type_ref; 29 30 typedef const InstrProfWriter::ProfilingData *const data_type; 31 typedef const InstrProfWriter::ProfilingData *const data_type_ref; 32 33 typedef uint64_t hash_value_type; 34 typedef uint64_t offset_type; 35 36 static hash_value_type ComputeHash(key_type_ref K) { 37 return IndexedInstrProf::ComputeHash(IndexedInstrProf::HashType, K); 38 } 39 40 static std::pair<offset_type, offset_type> 41 EmitKeyDataLength(raw_ostream &Out, key_type_ref K, data_type_ref V) { 42 using namespace llvm::support; 43 endian::Writer<little> LE(Out); 44 45 offset_type N = K.size(); 46 LE.write<offset_type>(N); 47 48 offset_type M = 0; 49 for (const auto &ProfileData : *V) { 50 const InstrProfRecord &ProfRecord = ProfileData.second; 51 M += sizeof(uint64_t); // The function hash 52 M += sizeof(uint64_t); // The size of the Counts vector 53 M += ProfRecord.Counts.size() * sizeof(uint64_t); 54 55 // Value data 56 M += IndexedInstrProf::ValueProfData::getSize(ProfileData.second); 57 } 58 LE.write<offset_type>(M); 59 60 return std::make_pair(N, M); 61 } 62 63 static void EmitKey(raw_ostream &Out, key_type_ref K, offset_type N){ 64 Out.write(K.data(), N); 65 } 66 67 static void EmitData(raw_ostream &Out, key_type_ref, data_type_ref V, 68 offset_type) { 69 using namespace llvm::support; 70 endian::Writer<little> LE(Out); 71 for (const auto &ProfileData : *V) { 72 const InstrProfRecord &ProfRecord = ProfileData.second; 73 74 LE.write<uint64_t>(ProfileData.first); // Function hash 75 LE.write<uint64_t>(ProfRecord.Counts.size()); 76 for (uint64_t I : ProfRecord.Counts) 77 LE.write<uint64_t>(I); 78 79 // Write value data 80 std::unique_ptr<IndexedInstrProf::ValueProfData> VDataPtr = 81 IndexedInstrProf::ValueProfData::serializeFrom(ProfileData.second); 82 uint32_t S = VDataPtr->getSize(); 83 VDataPtr->swapBytesFromHost(ValueProfDataEndianness); 84 Out.write((const char *)VDataPtr.get(), S); 85 } 86 } 87 }; 88 } 89 90 static std::error_code combineInstrProfRecords(InstrProfRecord &Dest, 91 InstrProfRecord &Source, 92 uint64_t &MaxFunctionCount) { 93 // If the number of counters doesn't match we either have bad data 94 // or a hash collision. 95 if (Dest.Counts.size() != Source.Counts.size()) 96 return instrprof_error::count_mismatch; 97 98 for (size_t I = 0, E = Source.Counts.size(); I < E; ++I) { 99 if (Dest.Counts[I] + Source.Counts[I] < Dest.Counts[I]) 100 return instrprof_error::counter_overflow; 101 Dest.Counts[I] += Source.Counts[I]; 102 } 103 104 for (uint32_t Kind = IPVK_First; Kind <= IPVK_Last; ++Kind) { 105 if (std::error_code EC = Dest.mergeValueProfData(Kind, Source)) 106 return EC; 107 } 108 109 // We keep track of the max function count as we go for simplicity. 110 if (Dest.Counts[0] > MaxFunctionCount) 111 MaxFunctionCount = Dest.Counts[0]; 112 113 return instrprof_error::success; 114 } 115 116 // Internal interface for testing purpose only. 117 void InstrProfWriter::setValueProfDataEndianness( 118 support::endianness Endianness) { 119 ValueProfDataEndianness = Endianness; 120 } 121 122 void InstrProfWriter::updateStringTableReferences(InstrProfRecord &I) { 123 I.updateStrings(&StringTable); 124 } 125 126 std::error_code InstrProfWriter::addRecord(InstrProfRecord &&I) { 127 updateStringTableReferences(I); 128 auto &ProfileDataMap = FunctionData[I.Name]; 129 130 auto Where = ProfileDataMap.find(I.Hash); 131 if (Where == ProfileDataMap.end()) { 132 // We've never seen a function with this name and hash, add it. 133 ProfileDataMap[I.Hash] = I; 134 135 // We keep track of the max function count as we go for simplicity. 136 if (I.Counts[0] > MaxFunctionCount) 137 MaxFunctionCount = I.Counts[0]; 138 return instrprof_error::success; 139 } 140 141 // We're updating a function we've seen before. 142 return combineInstrProfRecords(Where->second, I, MaxFunctionCount); 143 } 144 145 std::pair<uint64_t, uint64_t> InstrProfWriter::writeImpl(raw_ostream &OS) { 146 OnDiskChainedHashTableGenerator<InstrProfRecordTrait> Generator; 147 148 // Populate the hash table generator. 149 for (const auto &I : FunctionData) 150 Generator.insert(I.getKey(), &I.getValue()); 151 152 using namespace llvm::support; 153 endian::Writer<little> LE(OS); 154 155 // Write the header. 156 IndexedInstrProf::Header Header; 157 Header.Magic = IndexedInstrProf::Magic; 158 Header.Version = IndexedInstrProf::Version; 159 Header.MaxFunctionCount = MaxFunctionCount; 160 Header.HashType = static_cast<uint64_t>(IndexedInstrProf::HashType); 161 Header.HashOffset = 0; 162 int N = sizeof(IndexedInstrProf::Header) / sizeof(uint64_t); 163 164 // Only write out all the fields execpt 'HashOffset'. We need 165 // to remember the offset of that field to allow back patching 166 // later. 167 for (int I = 0; I < N - 1; I++) 168 LE.write<uint64_t>(reinterpret_cast<uint64_t *>(&Header)[I]); 169 170 // Save a space to write the hash table start location. 171 uint64_t HashTableStartLoc = OS.tell(); 172 // Reserve the space for HashOffset field. 173 LE.write<uint64_t>(0); 174 // Write the hash table. 175 uint64_t HashTableStart = Generator.Emit(OS); 176 177 return std::make_pair(HashTableStartLoc, HashTableStart); 178 } 179 180 void InstrProfWriter::write(raw_fd_ostream &OS) { 181 // Write the hash table. 182 auto TableStart = writeImpl(OS); 183 184 // Go back and fill in the hash table start. 185 using namespace support; 186 OS.seek(TableStart.first); 187 // Now patch the HashOffset field previously reserved. 188 endian::Writer<little>(OS).write<uint64_t>(TableStart.second); 189 } 190 191 std::unique_ptr<MemoryBuffer> InstrProfWriter::writeBuffer() { 192 std::string Data; 193 llvm::raw_string_ostream OS(Data); 194 // Write the hash table. 195 auto TableStart = writeImpl(OS); 196 OS.flush(); 197 198 // Go back and fill in the hash table start. 199 using namespace support; 200 uint64_t Bytes = endian::byte_swap<uint64_t, little>(TableStart.second); 201 Data.replace(TableStart.first, sizeof(uint64_t), (const char *)&Bytes, 202 sizeof(uint64_t)); 203 204 // Return this in an aligned memory buffer. 205 return MemoryBuffer::getMemBufferCopy(Data); 206 } 207