1 //===- InstrProfWriter.cpp - Instrumented profiling writer ----------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains support for writing profiling data for clang's
10 // instrumentation based PGO and coverage.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #include "llvm/ProfileData/InstrProfWriter.h"
15 #include "llvm/ADT/STLExtras.h"
16 #include "llvm/ADT/StringRef.h"
17 #include "llvm/IR/ProfileSummary.h"
18 #include "llvm/ProfileData/InstrProf.h"
19 #include "llvm/ProfileData/MemProf.h"
20 #include "llvm/ProfileData/ProfileCommon.h"
21 #include "llvm/Support/Endian.h"
22 #include "llvm/Support/EndianStream.h"
23 #include "llvm/Support/Error.h"
24 #include "llvm/Support/MemoryBuffer.h"
25 #include "llvm/Support/OnDiskHashTable.h"
26 #include "llvm/Support/raw_ostream.h"
27 #include <cstdint>
28 #include <memory>
29 #include <string>
30 #include <tuple>
31 #include <utility>
32 #include <vector>
33
34 using namespace llvm;
35
36 // A struct to define how the data stream should be patched. For Indexed
37 // profiling, only uint64_t data type is needed.
38 struct PatchItem {
39 uint64_t Pos; // Where to patch.
40 uint64_t *D; // Pointer to an array of source data.
41 int N; // Number of elements in \c D array.
42 };
43
44 namespace llvm {
45
46 // A wrapper class to abstract writer stream with support of bytes
47 // back patching.
48 class ProfOStream {
49 public:
ProfOStream(raw_fd_ostream & FD)50 ProfOStream(raw_fd_ostream &FD)
51 : IsFDOStream(true), OS(FD), LE(FD, support::little) {}
ProfOStream(raw_string_ostream & STR)52 ProfOStream(raw_string_ostream &STR)
53 : IsFDOStream(false), OS(STR), LE(STR, support::little) {}
54
tell()55 uint64_t tell() { return OS.tell(); }
write(uint64_t V)56 void write(uint64_t V) { LE.write<uint64_t>(V); }
57
58 // \c patch can only be called when all data is written and flushed.
59 // For raw_string_ostream, the patch is done on the target string
60 // directly and it won't be reflected in the stream's internal buffer.
patch(PatchItem * P,int NItems)61 void patch(PatchItem *P, int NItems) {
62 using namespace support;
63
64 if (IsFDOStream) {
65 raw_fd_ostream &FDOStream = static_cast<raw_fd_ostream &>(OS);
66 const uint64_t LastPos = FDOStream.tell();
67 for (int K = 0; K < NItems; K++) {
68 FDOStream.seek(P[K].Pos);
69 for (int I = 0; I < P[K].N; I++)
70 write(P[K].D[I]);
71 }
72 // Reset the stream to the last position after patching so that users
73 // don't accidentally overwrite data. This makes it consistent with
74 // the string stream below which replaces the data directly.
75 FDOStream.seek(LastPos);
76 } else {
77 raw_string_ostream &SOStream = static_cast<raw_string_ostream &>(OS);
78 std::string &Data = SOStream.str(); // with flush
79 for (int K = 0; K < NItems; K++) {
80 for (int I = 0; I < P[K].N; I++) {
81 uint64_t Bytes = endian::byte_swap<uint64_t, little>(P[K].D[I]);
82 Data.replace(P[K].Pos + I * sizeof(uint64_t), sizeof(uint64_t),
83 (const char *)&Bytes, sizeof(uint64_t));
84 }
85 }
86 }
87 }
88
89 // If \c OS is an instance of \c raw_fd_ostream, this field will be
90 // true. Otherwise, \c OS will be an raw_string_ostream.
91 bool IsFDOStream;
92 raw_ostream &OS;
93 support::endian::Writer LE;
94 };
95
96 class InstrProfRecordWriterTrait {
97 public:
98 using key_type = StringRef;
99 using key_type_ref = StringRef;
100
101 using data_type = const InstrProfWriter::ProfilingData *const;
102 using data_type_ref = const InstrProfWriter::ProfilingData *const;
103
104 using hash_value_type = uint64_t;
105 using offset_type = uint64_t;
106
107 support::endianness ValueProfDataEndianness = support::little;
108 InstrProfSummaryBuilder *SummaryBuilder;
109 InstrProfSummaryBuilder *CSSummaryBuilder;
110
111 InstrProfRecordWriterTrait() = default;
112
ComputeHash(key_type_ref K)113 static hash_value_type ComputeHash(key_type_ref K) {
114 return IndexedInstrProf::ComputeHash(K);
115 }
116
117 static std::pair<offset_type, offset_type>
EmitKeyDataLength(raw_ostream & Out,key_type_ref K,data_type_ref V)118 EmitKeyDataLength(raw_ostream &Out, key_type_ref K, data_type_ref V) {
119 using namespace support;
120
121 endian::Writer LE(Out, little);
122
123 offset_type N = K.size();
124 LE.write<offset_type>(N);
125
126 offset_type M = 0;
127 for (const auto &ProfileData : *V) {
128 const InstrProfRecord &ProfRecord = ProfileData.second;
129 M += sizeof(uint64_t); // The function hash
130 M += sizeof(uint64_t); // The size of the Counts vector
131 M += ProfRecord.Counts.size() * sizeof(uint64_t);
132
133 // Value data
134 M += ValueProfData::getSize(ProfileData.second);
135 }
136 LE.write<offset_type>(M);
137
138 return std::make_pair(N, M);
139 }
140
EmitKey(raw_ostream & Out,key_type_ref K,offset_type N)141 void EmitKey(raw_ostream &Out, key_type_ref K, offset_type N) {
142 Out.write(K.data(), N);
143 }
144
EmitData(raw_ostream & Out,key_type_ref,data_type_ref V,offset_type)145 void EmitData(raw_ostream &Out, key_type_ref, data_type_ref V, offset_type) {
146 using namespace support;
147
148 endian::Writer LE(Out, little);
149 for (const auto &ProfileData : *V) {
150 const InstrProfRecord &ProfRecord = ProfileData.second;
151 if (NamedInstrProfRecord::hasCSFlagInHash(ProfileData.first))
152 CSSummaryBuilder->addRecord(ProfRecord);
153 else
154 SummaryBuilder->addRecord(ProfRecord);
155
156 LE.write<uint64_t>(ProfileData.first); // Function hash
157 LE.write<uint64_t>(ProfRecord.Counts.size());
158 for (uint64_t I : ProfRecord.Counts)
159 LE.write<uint64_t>(I);
160
161 // Write value data
162 std::unique_ptr<ValueProfData> VDataPtr =
163 ValueProfData::serializeFrom(ProfileData.second);
164 uint32_t S = VDataPtr->getSize();
165 VDataPtr->swapBytesFromHost(ValueProfDataEndianness);
166 Out.write((const char *)VDataPtr.get(), S);
167 }
168 }
169 };
170
171 } // end namespace llvm
172
InstrProfWriter(bool Sparse)173 InstrProfWriter::InstrProfWriter(bool Sparse)
174 : Sparse(Sparse), InfoObj(new InstrProfRecordWriterTrait()) {}
175
~InstrProfWriter()176 InstrProfWriter::~InstrProfWriter() { delete InfoObj; }
177
178 // Internal interface for testing purpose only.
setValueProfDataEndianness(support::endianness Endianness)179 void InstrProfWriter::setValueProfDataEndianness(
180 support::endianness Endianness) {
181 InfoObj->ValueProfDataEndianness = Endianness;
182 }
183
setOutputSparse(bool Sparse)184 void InstrProfWriter::setOutputSparse(bool Sparse) {
185 this->Sparse = Sparse;
186 }
187
addRecord(NamedInstrProfRecord && I,uint64_t Weight,function_ref<void (Error)> Warn)188 void InstrProfWriter::addRecord(NamedInstrProfRecord &&I, uint64_t Weight,
189 function_ref<void(Error)> Warn) {
190 auto Name = I.Name;
191 auto Hash = I.Hash;
192 addRecord(Name, Hash, std::move(I), Weight, Warn);
193 }
194
overlapRecord(NamedInstrProfRecord && Other,OverlapStats & Overlap,OverlapStats & FuncLevelOverlap,const OverlapFuncFilters & FuncFilter)195 void InstrProfWriter::overlapRecord(NamedInstrProfRecord &&Other,
196 OverlapStats &Overlap,
197 OverlapStats &FuncLevelOverlap,
198 const OverlapFuncFilters &FuncFilter) {
199 auto Name = Other.Name;
200 auto Hash = Other.Hash;
201 Other.accumulateCounts(FuncLevelOverlap.Test);
202 if (FunctionData.find(Name) == FunctionData.end()) {
203 Overlap.addOneUnique(FuncLevelOverlap.Test);
204 return;
205 }
206 if (FuncLevelOverlap.Test.CountSum < 1.0f) {
207 Overlap.Overlap.NumEntries += 1;
208 return;
209 }
210 auto &ProfileDataMap = FunctionData[Name];
211 bool NewFunc;
212 ProfilingData::iterator Where;
213 std::tie(Where, NewFunc) =
214 ProfileDataMap.insert(std::make_pair(Hash, InstrProfRecord()));
215 if (NewFunc) {
216 Overlap.addOneMismatch(FuncLevelOverlap.Test);
217 return;
218 }
219 InstrProfRecord &Dest = Where->second;
220
221 uint64_t ValueCutoff = FuncFilter.ValueCutoff;
222 if (!FuncFilter.NameFilter.empty() && Name.contains(FuncFilter.NameFilter))
223 ValueCutoff = 0;
224
225 Dest.overlap(Other, Overlap, FuncLevelOverlap, ValueCutoff);
226 }
227
addRecord(StringRef Name,uint64_t Hash,InstrProfRecord && I,uint64_t Weight,function_ref<void (Error)> Warn)228 void InstrProfWriter::addRecord(StringRef Name, uint64_t Hash,
229 InstrProfRecord &&I, uint64_t Weight,
230 function_ref<void(Error)> Warn) {
231 auto &ProfileDataMap = FunctionData[Name];
232
233 bool NewFunc;
234 ProfilingData::iterator Where;
235 std::tie(Where, NewFunc) =
236 ProfileDataMap.insert(std::make_pair(Hash, InstrProfRecord()));
237 InstrProfRecord &Dest = Where->second;
238
239 auto MapWarn = [&](instrprof_error E) {
240 Warn(make_error<InstrProfError>(E));
241 };
242
243 if (NewFunc) {
244 // We've never seen a function with this name and hash, add it.
245 Dest = std::move(I);
246 if (Weight > 1)
247 Dest.scale(Weight, 1, MapWarn);
248 } else {
249 // We're updating a function we've seen before.
250 Dest.merge(I, Weight, MapWarn);
251 }
252
253 Dest.sortValueData();
254 }
255
addMemProfRecord(const Function::GUID Id,const memprof::IndexedMemProfRecord & Record)256 void InstrProfWriter::addMemProfRecord(
257 const Function::GUID Id, const memprof::IndexedMemProfRecord &Record) {
258 auto Result = MemProfRecordData.insert({Id, Record});
259 // If we inserted a new record then we are done.
260 if (Result.second) {
261 return;
262 }
263 memprof::IndexedMemProfRecord &Existing = Result.first->second;
264 Existing.merge(Record);
265 }
266
addMemProfFrame(const memprof::FrameId Id,const memprof::Frame & Frame,function_ref<void (Error)> Warn)267 bool InstrProfWriter::addMemProfFrame(const memprof::FrameId Id,
268 const memprof::Frame &Frame,
269 function_ref<void(Error)> Warn) {
270 auto Result = MemProfFrameData.insert({Id, Frame});
271 // If a mapping already exists for the current frame id and it does not
272 // match the new mapping provided then reset the existing contents and bail
273 // out. We don't support the merging of memprof data whose Frame -> Id
274 // mapping across profiles is inconsistent.
275 if (!Result.second && Result.first->second != Frame) {
276 Warn(make_error<InstrProfError>(instrprof_error::malformed,
277 "frame to id mapping mismatch"));
278 return false;
279 }
280 return true;
281 }
282
mergeRecordsFromWriter(InstrProfWriter && IPW,function_ref<void (Error)> Warn)283 void InstrProfWriter::mergeRecordsFromWriter(InstrProfWriter &&IPW,
284 function_ref<void(Error)> Warn) {
285 for (auto &I : IPW.FunctionData)
286 for (auto &Func : I.getValue())
287 addRecord(I.getKey(), Func.first, std::move(Func.second), 1, Warn);
288
289 MemProfFrameData.reserve(IPW.MemProfFrameData.size());
290 for (auto &I : IPW.MemProfFrameData) {
291 // If we weren't able to add the frame mappings then it doesn't make sense
292 // to try to merge the records from this profile.
293 if (!addMemProfFrame(I.first, I.second, Warn))
294 return;
295 }
296
297 MemProfRecordData.reserve(IPW.MemProfRecordData.size());
298 for (auto &I : IPW.MemProfRecordData) {
299 addMemProfRecord(I.first, I.second);
300 }
301 }
302
shouldEncodeData(const ProfilingData & PD)303 bool InstrProfWriter::shouldEncodeData(const ProfilingData &PD) {
304 if (!Sparse)
305 return true;
306 for (const auto &Func : PD) {
307 const InstrProfRecord &IPR = Func.second;
308 if (llvm::any_of(IPR.Counts, [](uint64_t Count) { return Count > 0; }))
309 return true;
310 }
311 return false;
312 }
313
setSummary(IndexedInstrProf::Summary * TheSummary,ProfileSummary & PS)314 static void setSummary(IndexedInstrProf::Summary *TheSummary,
315 ProfileSummary &PS) {
316 using namespace IndexedInstrProf;
317
318 const std::vector<ProfileSummaryEntry> &Res = PS.getDetailedSummary();
319 TheSummary->NumSummaryFields = Summary::NumKinds;
320 TheSummary->NumCutoffEntries = Res.size();
321 TheSummary->set(Summary::MaxFunctionCount, PS.getMaxFunctionCount());
322 TheSummary->set(Summary::MaxBlockCount, PS.getMaxCount());
323 TheSummary->set(Summary::MaxInternalBlockCount, PS.getMaxInternalCount());
324 TheSummary->set(Summary::TotalBlockCount, PS.getTotalCount());
325 TheSummary->set(Summary::TotalNumBlocks, PS.getNumCounts());
326 TheSummary->set(Summary::TotalNumFunctions, PS.getNumFunctions());
327 for (unsigned I = 0; I < Res.size(); I++)
328 TheSummary->setEntry(I, Res[I]);
329 }
330
writeImpl(ProfOStream & OS)331 Error InstrProfWriter::writeImpl(ProfOStream &OS) {
332 using namespace IndexedInstrProf;
333
334 OnDiskChainedHashTableGenerator<InstrProfRecordWriterTrait> Generator;
335
336 InstrProfSummaryBuilder ISB(ProfileSummaryBuilder::DefaultCutoffs);
337 InfoObj->SummaryBuilder = &ISB;
338 InstrProfSummaryBuilder CSISB(ProfileSummaryBuilder::DefaultCutoffs);
339 InfoObj->CSSummaryBuilder = &CSISB;
340
341 // Populate the hash table generator.
342 for (const auto &I : FunctionData)
343 if (shouldEncodeData(I.getValue()))
344 Generator.insert(I.getKey(), &I.getValue());
345
346 // Write the header.
347 IndexedInstrProf::Header Header;
348 Header.Magic = IndexedInstrProf::Magic;
349 Header.Version = IndexedInstrProf::ProfVersion::CurrentVersion;
350 if (static_cast<bool>(ProfileKind & InstrProfKind::IRInstrumentation))
351 Header.Version |= VARIANT_MASK_IR_PROF;
352 if (static_cast<bool>(ProfileKind & InstrProfKind::ContextSensitive))
353 Header.Version |= VARIANT_MASK_CSIR_PROF;
354 if (static_cast<bool>(ProfileKind &
355 InstrProfKind::FunctionEntryInstrumentation))
356 Header.Version |= VARIANT_MASK_INSTR_ENTRY;
357 if (static_cast<bool>(ProfileKind & InstrProfKind::SingleByteCoverage))
358 Header.Version |= VARIANT_MASK_BYTE_COVERAGE;
359 if (static_cast<bool>(ProfileKind & InstrProfKind::FunctionEntryOnly))
360 Header.Version |= VARIANT_MASK_FUNCTION_ENTRY_ONLY;
361 if (static_cast<bool>(ProfileKind & InstrProfKind::MemProf))
362 Header.Version |= VARIANT_MASK_MEMPROF;
363
364 Header.Unused = 0;
365 Header.HashType = static_cast<uint64_t>(IndexedInstrProf::HashType);
366 Header.HashOffset = 0;
367 Header.MemProfOffset = 0;
368 int N = sizeof(IndexedInstrProf::Header) / sizeof(uint64_t);
369
370 // Only write out all the fields except 'HashOffset' and 'MemProfOffset'. We
371 // need to remember the offset of these fields to allow back patching later.
372 for (int I = 0; I < N - 2; I++)
373 OS.write(reinterpret_cast<uint64_t *>(&Header)[I]);
374
375 // Save the location of Header.HashOffset field in \c OS.
376 uint64_t HashTableStartFieldOffset = OS.tell();
377 // Reserve the space for HashOffset field.
378 OS.write(0);
379
380 // Save the location of MemProf profile data. This is stored in two parts as
381 // the schema and as a separate on-disk chained hashtable.
382 uint64_t MemProfSectionOffset = OS.tell();
383 // Reserve space for the MemProf table field to be patched later if this
384 // profile contains memory profile information.
385 OS.write(0);
386
387 // Reserve space to write profile summary data.
388 uint32_t NumEntries = ProfileSummaryBuilder::DefaultCutoffs.size();
389 uint32_t SummarySize = Summary::getSize(Summary::NumKinds, NumEntries);
390 // Remember the summary offset.
391 uint64_t SummaryOffset = OS.tell();
392 for (unsigned I = 0; I < SummarySize / sizeof(uint64_t); I++)
393 OS.write(0);
394 uint64_t CSSummaryOffset = 0;
395 uint64_t CSSummarySize = 0;
396 if (static_cast<bool>(ProfileKind & InstrProfKind::ContextSensitive)) {
397 CSSummaryOffset = OS.tell();
398 CSSummarySize = SummarySize / sizeof(uint64_t);
399 for (unsigned I = 0; I < CSSummarySize; I++)
400 OS.write(0);
401 }
402
403 // Write the hash table.
404 uint64_t HashTableStart = Generator.Emit(OS.OS, *InfoObj);
405
406 // Write the MemProf profile data if we have it. This includes a simple schema
407 // with the format described below followed by the hashtable:
408 // uint64_t RecordTableOffset = RecordTableGenerator.Emit
409 // uint64_t FramePayloadOffset = Stream offset before emitting the frame table
410 // uint64_t FrameTableOffset = FrameTableGenerator.Emit
411 // uint64_t Num schema entries
412 // uint64_t Schema entry 0
413 // uint64_t Schema entry 1
414 // ....
415 // uint64_t Schema entry N - 1
416 // OnDiskChainedHashTable MemProfRecordData
417 // OnDiskChainedHashTable MemProfFrameData
418 uint64_t MemProfSectionStart = 0;
419 if (static_cast<bool>(ProfileKind & InstrProfKind::MemProf)) {
420 MemProfSectionStart = OS.tell();
421 OS.write(0ULL); // Reserve space for the memprof record table offset.
422 OS.write(0ULL); // Reserve space for the memprof frame payload offset.
423 OS.write(0ULL); // Reserve space for the memprof frame table offset.
424
425 auto Schema = memprof::PortableMemInfoBlock::getSchema();
426 OS.write(static_cast<uint64_t>(Schema.size()));
427 for (const auto Id : Schema) {
428 OS.write(static_cast<uint64_t>(Id));
429 }
430
431 auto RecordWriter = std::make_unique<memprof::RecordWriterTrait>();
432 RecordWriter->Schema = &Schema;
433 OnDiskChainedHashTableGenerator<memprof::RecordWriterTrait>
434 RecordTableGenerator;
435 for (auto &I : MemProfRecordData) {
436 // Insert the key (func hash) and value (memprof record).
437 RecordTableGenerator.insert(I.first, I.second);
438 }
439
440 uint64_t RecordTableOffset =
441 RecordTableGenerator.Emit(OS.OS, *RecordWriter);
442
443 uint64_t FramePayloadOffset = OS.tell();
444
445 auto FrameWriter = std::make_unique<memprof::FrameWriterTrait>();
446 OnDiskChainedHashTableGenerator<memprof::FrameWriterTrait>
447 FrameTableGenerator;
448 for (auto &I : MemProfFrameData) {
449 // Insert the key (frame id) and value (frame contents).
450 FrameTableGenerator.insert(I.first, I.second);
451 }
452
453 uint64_t FrameTableOffset = FrameTableGenerator.Emit(OS.OS, *FrameWriter);
454
455 PatchItem PatchItems[] = {
456 {MemProfSectionStart, &RecordTableOffset, 1},
457 {MemProfSectionStart + sizeof(uint64_t), &FramePayloadOffset, 1},
458 {MemProfSectionStart + 2 * sizeof(uint64_t), &FrameTableOffset, 1},
459 };
460 OS.patch(PatchItems, 3);
461 }
462
463 // Allocate space for data to be serialized out.
464 std::unique_ptr<IndexedInstrProf::Summary> TheSummary =
465 IndexedInstrProf::allocSummary(SummarySize);
466 // Compute the Summary and copy the data to the data
467 // structure to be serialized out (to disk or buffer).
468 std::unique_ptr<ProfileSummary> PS = ISB.getSummary();
469 setSummary(TheSummary.get(), *PS);
470 InfoObj->SummaryBuilder = nullptr;
471
472 // For Context Sensitive summary.
473 std::unique_ptr<IndexedInstrProf::Summary> TheCSSummary = nullptr;
474 if (static_cast<bool>(ProfileKind & InstrProfKind::ContextSensitive)) {
475 TheCSSummary = IndexedInstrProf::allocSummary(SummarySize);
476 std::unique_ptr<ProfileSummary> CSPS = CSISB.getSummary();
477 setSummary(TheCSSummary.get(), *CSPS);
478 }
479 InfoObj->CSSummaryBuilder = nullptr;
480
481 // Now do the final patch:
482 PatchItem PatchItems[] = {
483 // Patch the Header.HashOffset field.
484 {HashTableStartFieldOffset, &HashTableStart, 1},
485 // Patch the Header.MemProfOffset (=0 for profiles without MemProf data).
486 {MemProfSectionOffset, &MemProfSectionStart, 1},
487 // Patch the summary data.
488 {SummaryOffset, reinterpret_cast<uint64_t *>(TheSummary.get()),
489 (int)(SummarySize / sizeof(uint64_t))},
490 {CSSummaryOffset, reinterpret_cast<uint64_t *>(TheCSSummary.get()),
491 (int)CSSummarySize}};
492
493 OS.patch(PatchItems, sizeof(PatchItems) / sizeof(*PatchItems));
494
495 for (const auto &I : FunctionData)
496 for (const auto &F : I.getValue())
497 if (Error E = validateRecord(F.second))
498 return E;
499
500 return Error::success();
501 }
502
write(raw_fd_ostream & OS)503 Error InstrProfWriter::write(raw_fd_ostream &OS) {
504 // Write the hash table.
505 ProfOStream POS(OS);
506 return writeImpl(POS);
507 }
508
writeBuffer()509 std::unique_ptr<MemoryBuffer> InstrProfWriter::writeBuffer() {
510 std::string Data;
511 raw_string_ostream OS(Data);
512 ProfOStream POS(OS);
513 // Write the hash table.
514 if (Error E = writeImpl(POS))
515 return nullptr;
516 // Return this in an aligned memory buffer.
517 return MemoryBuffer::getMemBufferCopy(Data);
518 }
519
520 static const char *ValueProfKindStr[] = {
521 #define VALUE_PROF_KIND(Enumerator, Value, Descr) #Enumerator,
522 #include "llvm/ProfileData/InstrProfData.inc"
523 };
524
validateRecord(const InstrProfRecord & Func)525 Error InstrProfWriter::validateRecord(const InstrProfRecord &Func) {
526 for (uint32_t VK = 0; VK <= IPVK_Last; VK++) {
527 uint32_t NS = Func.getNumValueSites(VK);
528 if (!NS)
529 continue;
530 for (uint32_t S = 0; S < NS; S++) {
531 uint32_t ND = Func.getNumValueDataForSite(VK, S);
532 std::unique_ptr<InstrProfValueData[]> VD = Func.getValueForSite(VK, S);
533 bool WasZero = false;
534 for (uint32_t I = 0; I < ND; I++)
535 if ((VK != IPVK_IndirectCallTarget) && (VD[I].Value == 0)) {
536 if (WasZero)
537 return make_error<InstrProfError>(instrprof_error::invalid_prof);
538 WasZero = true;
539 }
540 }
541 }
542
543 return Error::success();
544 }
545
writeRecordInText(StringRef Name,uint64_t Hash,const InstrProfRecord & Func,InstrProfSymtab & Symtab,raw_fd_ostream & OS)546 void InstrProfWriter::writeRecordInText(StringRef Name, uint64_t Hash,
547 const InstrProfRecord &Func,
548 InstrProfSymtab &Symtab,
549 raw_fd_ostream &OS) {
550 OS << Name << "\n";
551 OS << "# Func Hash:\n" << Hash << "\n";
552 OS << "# Num Counters:\n" << Func.Counts.size() << "\n";
553 OS << "# Counter Values:\n";
554 for (uint64_t Count : Func.Counts)
555 OS << Count << "\n";
556
557 uint32_t NumValueKinds = Func.getNumValueKinds();
558 if (!NumValueKinds) {
559 OS << "\n";
560 return;
561 }
562
563 OS << "# Num Value Kinds:\n" << Func.getNumValueKinds() << "\n";
564 for (uint32_t VK = 0; VK < IPVK_Last + 1; VK++) {
565 uint32_t NS = Func.getNumValueSites(VK);
566 if (!NS)
567 continue;
568 OS << "# ValueKind = " << ValueProfKindStr[VK] << ":\n" << VK << "\n";
569 OS << "# NumValueSites:\n" << NS << "\n";
570 for (uint32_t S = 0; S < NS; S++) {
571 uint32_t ND = Func.getNumValueDataForSite(VK, S);
572 OS << ND << "\n";
573 std::unique_ptr<InstrProfValueData[]> VD = Func.getValueForSite(VK, S);
574 for (uint32_t I = 0; I < ND; I++) {
575 if (VK == IPVK_IndirectCallTarget)
576 OS << Symtab.getFuncNameOrExternalSymbol(VD[I].Value) << ":"
577 << VD[I].Count << "\n";
578 else
579 OS << VD[I].Value << ":" << VD[I].Count << "\n";
580 }
581 }
582 }
583
584 OS << "\n";
585 }
586
writeText(raw_fd_ostream & OS)587 Error InstrProfWriter::writeText(raw_fd_ostream &OS) {
588 // Check CS first since it implies an IR level profile.
589 if (static_cast<bool>(ProfileKind & InstrProfKind::ContextSensitive))
590 OS << "# CSIR level Instrumentation Flag\n:csir\n";
591 else if (static_cast<bool>(ProfileKind & InstrProfKind::IRInstrumentation))
592 OS << "# IR level Instrumentation Flag\n:ir\n";
593
594 if (static_cast<bool>(ProfileKind &
595 InstrProfKind::FunctionEntryInstrumentation))
596 OS << "# Always instrument the function entry block\n:entry_first\n";
597 InstrProfSymtab Symtab;
598
599 using FuncPair = detail::DenseMapPair<uint64_t, InstrProfRecord>;
600 using RecordType = std::pair<StringRef, FuncPair>;
601 SmallVector<RecordType, 4> OrderedFuncData;
602
603 for (const auto &I : FunctionData) {
604 if (shouldEncodeData(I.getValue())) {
605 if (Error E = Symtab.addFuncName(I.getKey()))
606 return E;
607 for (const auto &Func : I.getValue())
608 OrderedFuncData.push_back(std::make_pair(I.getKey(), Func));
609 }
610 }
611
612 llvm::sort(OrderedFuncData, [](const RecordType &A, const RecordType &B) {
613 return std::tie(A.first, A.second.first) <
614 std::tie(B.first, B.second.first);
615 });
616
617 for (const auto &record : OrderedFuncData) {
618 const StringRef &Name = record.first;
619 const FuncPair &Func = record.second;
620 writeRecordInText(Name, Func.first, Func.second, Symtab, OS);
621 }
622
623 for (const auto &record : OrderedFuncData) {
624 const FuncPair &Func = record.second;
625 if (Error E = validateRecord(Func.second))
626 return E;
627 }
628
629 return Error::success();
630 }
631