1 //===- RewriteStatepointsForGC.cpp - Make GC relocations explicit ---------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // Rewrite call/invoke instructions so as to make potential relocations
10 // performed by the garbage collector explicit in the IR.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #include "llvm/Transforms/Scalar/RewriteStatepointsForGC.h"
15
16 #include "llvm/ADT/ArrayRef.h"
17 #include "llvm/ADT/DenseMap.h"
18 #include "llvm/ADT/DenseSet.h"
19 #include "llvm/ADT/MapVector.h"
20 #include "llvm/ADT/None.h"
21 #include "llvm/ADT/Optional.h"
22 #include "llvm/ADT/STLExtras.h"
23 #include "llvm/ADT/SetVector.h"
24 #include "llvm/ADT/SmallSet.h"
25 #include "llvm/ADT/SmallVector.h"
26 #include "llvm/ADT/StringRef.h"
27 #include "llvm/ADT/iterator_range.h"
28 #include "llvm/Analysis/DomTreeUpdater.h"
29 #include "llvm/Analysis/TargetLibraryInfo.h"
30 #include "llvm/Analysis/TargetTransformInfo.h"
31 #include "llvm/IR/Argument.h"
32 #include "llvm/IR/Attributes.h"
33 #include "llvm/IR/BasicBlock.h"
34 #include "llvm/IR/CallingConv.h"
35 #include "llvm/IR/Constant.h"
36 #include "llvm/IR/Constants.h"
37 #include "llvm/IR/DataLayout.h"
38 #include "llvm/IR/DerivedTypes.h"
39 #include "llvm/IR/Dominators.h"
40 #include "llvm/IR/Function.h"
41 #include "llvm/IR/IRBuilder.h"
42 #include "llvm/IR/InstIterator.h"
43 #include "llvm/IR/InstrTypes.h"
44 #include "llvm/IR/Instruction.h"
45 #include "llvm/IR/Instructions.h"
46 #include "llvm/IR/IntrinsicInst.h"
47 #include "llvm/IR/Intrinsics.h"
48 #include "llvm/IR/LLVMContext.h"
49 #include "llvm/IR/MDBuilder.h"
50 #include "llvm/IR/Metadata.h"
51 #include "llvm/IR/Module.h"
52 #include "llvm/IR/Statepoint.h"
53 #include "llvm/IR/Type.h"
54 #include "llvm/IR/User.h"
55 #include "llvm/IR/Value.h"
56 #include "llvm/IR/ValueHandle.h"
57 #include "llvm/InitializePasses.h"
58 #include "llvm/Pass.h"
59 #include "llvm/Support/Casting.h"
60 #include "llvm/Support/CommandLine.h"
61 #include "llvm/Support/Compiler.h"
62 #include "llvm/Support/Debug.h"
63 #include "llvm/Support/ErrorHandling.h"
64 #include "llvm/Support/raw_ostream.h"
65 #include "llvm/Transforms/Scalar.h"
66 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
67 #include "llvm/Transforms/Utils/Local.h"
68 #include "llvm/Transforms/Utils/PromoteMemToReg.h"
69 #include <algorithm>
70 #include <cassert>
71 #include <cstddef>
72 #include <cstdint>
73 #include <iterator>
74 #include <set>
75 #include <string>
76 #include <utility>
77 #include <vector>
78
79 #define DEBUG_TYPE "rewrite-statepoints-for-gc"
80
81 using namespace llvm;
82
83 // Print the liveset found at the insert location
84 static cl::opt<bool> PrintLiveSet("spp-print-liveset", cl::Hidden,
85 cl::init(false));
86 static cl::opt<bool> PrintLiveSetSize("spp-print-liveset-size", cl::Hidden,
87 cl::init(false));
88
89 // Print out the base pointers for debugging
90 static cl::opt<bool> PrintBasePointers("spp-print-base-pointers", cl::Hidden,
91 cl::init(false));
92
93 // Cost threshold measuring when it is profitable to rematerialize value instead
94 // of relocating it
95 static cl::opt<unsigned>
96 RematerializationThreshold("spp-rematerialization-threshold", cl::Hidden,
97 cl::init(6));
98
99 #ifdef EXPENSIVE_CHECKS
100 static bool ClobberNonLive = true;
101 #else
102 static bool ClobberNonLive = false;
103 #endif
104
105 static cl::opt<bool, true> ClobberNonLiveOverride("rs4gc-clobber-non-live",
106 cl::location(ClobberNonLive),
107 cl::Hidden);
108
109 static cl::opt<bool>
110 AllowStatepointWithNoDeoptInfo("rs4gc-allow-statepoint-with-no-deopt-info",
111 cl::Hidden, cl::init(true));
112
113 /// The IR fed into RewriteStatepointsForGC may have had attributes and
114 /// metadata implying dereferenceability that are no longer valid/correct after
115 /// RewriteStatepointsForGC has run. This is because semantically, after
116 /// RewriteStatepointsForGC runs, all calls to gc.statepoint "free" the entire
117 /// heap. stripNonValidData (conservatively) restores
118 /// correctness by erasing all attributes in the module that externally imply
119 /// dereferenceability. Similar reasoning also applies to the noalias
120 /// attributes and metadata. gc.statepoint can touch the entire heap including
121 /// noalias objects.
122 /// Apart from attributes and metadata, we also remove instructions that imply
123 /// constant physical memory: llvm.invariant.start.
124 static void stripNonValidData(Module &M);
125
126 static bool shouldRewriteStatepointsIn(Function &F);
127
run(Module & M,ModuleAnalysisManager & AM)128 PreservedAnalyses RewriteStatepointsForGC::run(Module &M,
129 ModuleAnalysisManager &AM) {
130 bool Changed = false;
131 auto &FAM = AM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager();
132 for (Function &F : M) {
133 // Nothing to do for declarations.
134 if (F.isDeclaration() || F.empty())
135 continue;
136
137 // Policy choice says not to rewrite - the most common reason is that we're
138 // compiling code without a GCStrategy.
139 if (!shouldRewriteStatepointsIn(F))
140 continue;
141
142 auto &DT = FAM.getResult<DominatorTreeAnalysis>(F);
143 auto &TTI = FAM.getResult<TargetIRAnalysis>(F);
144 auto &TLI = FAM.getResult<TargetLibraryAnalysis>(F);
145 Changed |= runOnFunction(F, DT, TTI, TLI);
146 }
147 if (!Changed)
148 return PreservedAnalyses::all();
149
150 // stripNonValidData asserts that shouldRewriteStatepointsIn
151 // returns true for at least one function in the module. Since at least
152 // one function changed, we know that the precondition is satisfied.
153 stripNonValidData(M);
154
155 PreservedAnalyses PA;
156 PA.preserve<TargetIRAnalysis>();
157 PA.preserve<TargetLibraryAnalysis>();
158 return PA;
159 }
160
161 namespace {
162
163 class RewriteStatepointsForGCLegacyPass : public ModulePass {
164 RewriteStatepointsForGC Impl;
165
166 public:
167 static char ID; // Pass identification, replacement for typeid
168
RewriteStatepointsForGCLegacyPass()169 RewriteStatepointsForGCLegacyPass() : ModulePass(ID), Impl() {
170 initializeRewriteStatepointsForGCLegacyPassPass(
171 *PassRegistry::getPassRegistry());
172 }
173
runOnModule(Module & M)174 bool runOnModule(Module &M) override {
175 bool Changed = false;
176 for (Function &F : M) {
177 // Nothing to do for declarations.
178 if (F.isDeclaration() || F.empty())
179 continue;
180
181 // Policy choice says not to rewrite - the most common reason is that
182 // we're compiling code without a GCStrategy.
183 if (!shouldRewriteStatepointsIn(F))
184 continue;
185
186 TargetTransformInfo &TTI =
187 getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
188 const TargetLibraryInfo &TLI =
189 getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
190 auto &DT = getAnalysis<DominatorTreeWrapperPass>(F).getDomTree();
191
192 Changed |= Impl.runOnFunction(F, DT, TTI, TLI);
193 }
194
195 if (!Changed)
196 return false;
197
198 // stripNonValidData asserts that shouldRewriteStatepointsIn
199 // returns true for at least one function in the module. Since at least
200 // one function changed, we know that the precondition is satisfied.
201 stripNonValidData(M);
202 return true;
203 }
204
getAnalysisUsage(AnalysisUsage & AU) const205 void getAnalysisUsage(AnalysisUsage &AU) const override {
206 // We add and rewrite a bunch of instructions, but don't really do much
207 // else. We could in theory preserve a lot more analyses here.
208 AU.addRequired<DominatorTreeWrapperPass>();
209 AU.addRequired<TargetTransformInfoWrapperPass>();
210 AU.addRequired<TargetLibraryInfoWrapperPass>();
211 }
212 };
213
214 } // end anonymous namespace
215
216 char RewriteStatepointsForGCLegacyPass::ID = 0;
217
createRewriteStatepointsForGCLegacyPass()218 ModulePass *llvm::createRewriteStatepointsForGCLegacyPass() {
219 return new RewriteStatepointsForGCLegacyPass();
220 }
221
222 INITIALIZE_PASS_BEGIN(RewriteStatepointsForGCLegacyPass,
223 "rewrite-statepoints-for-gc",
224 "Make relocations explicit at statepoints", false, false)
225 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
226 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
227 INITIALIZE_PASS_END(RewriteStatepointsForGCLegacyPass,
228 "rewrite-statepoints-for-gc",
229 "Make relocations explicit at statepoints", false, false)
230
231 namespace {
232
233 struct GCPtrLivenessData {
234 /// Values defined in this block.
235 MapVector<BasicBlock *, SetVector<Value *>> KillSet;
236
237 /// Values used in this block (and thus live); does not included values
238 /// killed within this block.
239 MapVector<BasicBlock *, SetVector<Value *>> LiveSet;
240
241 /// Values live into this basic block (i.e. used by any
242 /// instruction in this basic block or ones reachable from here)
243 MapVector<BasicBlock *, SetVector<Value *>> LiveIn;
244
245 /// Values live out of this basic block (i.e. live into
246 /// any successor block)
247 MapVector<BasicBlock *, SetVector<Value *>> LiveOut;
248 };
249
250 // The type of the internal cache used inside the findBasePointers family
251 // of functions. From the callers perspective, this is an opaque type and
252 // should not be inspected.
253 //
254 // In the actual implementation this caches two relations:
255 // - The base relation itself (i.e. this pointer is based on that one)
256 // - The base defining value relation (i.e. before base_phi insertion)
257 // Generally, after the execution of a full findBasePointer call, only the
258 // base relation will remain. Internally, we add a mixture of the two
259 // types, then update all the second type to the first type
260 using DefiningValueMapTy = MapVector<Value *, Value *>;
261 using IsKnownBaseMapTy = MapVector<Value *, bool>;
262 using PointerToBaseTy = MapVector<Value *, Value *>;
263 using StatepointLiveSetTy = SetVector<Value *>;
264 using RematerializedValueMapTy =
265 MapVector<AssertingVH<Instruction>, AssertingVH<Value>>;
266
267 struct PartiallyConstructedSafepointRecord {
268 /// The set of values known to be live across this safepoint
269 StatepointLiveSetTy LiveSet;
270
271 /// The *new* gc.statepoint instruction itself. This produces the token
272 /// that normal path gc.relocates and the gc.result are tied to.
273 GCStatepointInst *StatepointToken;
274
275 /// Instruction to which exceptional gc relocates are attached
276 /// Makes it easier to iterate through them during relocationViaAlloca.
277 Instruction *UnwindToken;
278
279 /// Record live values we are rematerialized instead of relocating.
280 /// They are not included into 'LiveSet' field.
281 /// Maps rematerialized copy to it's original value.
282 RematerializedValueMapTy RematerializedValues;
283 };
284
285 struct RematerizlizationCandidateRecord {
286 // Chain from derived pointer to base.
287 SmallVector<Instruction *, 3> ChainToBase;
288 // Original base.
289 Value *RootOfChain;
290 // Cost of chain.
291 InstructionCost Cost;
292 };
293 using RematCandTy = MapVector<Value *, RematerizlizationCandidateRecord>;
294
295 } // end anonymous namespace
296
GetDeoptBundleOperands(const CallBase * Call)297 static ArrayRef<Use> GetDeoptBundleOperands(const CallBase *Call) {
298 Optional<OperandBundleUse> DeoptBundle =
299 Call->getOperandBundle(LLVMContext::OB_deopt);
300
301 if (!DeoptBundle) {
302 assert(AllowStatepointWithNoDeoptInfo &&
303 "Found non-leaf call without deopt info!");
304 return None;
305 }
306
307 return DeoptBundle->Inputs;
308 }
309
310 /// Compute the live-in set for every basic block in the function
311 static void computeLiveInValues(DominatorTree &DT, Function &F,
312 GCPtrLivenessData &Data);
313
314 /// Given results from the dataflow liveness computation, find the set of live
315 /// Values at a particular instruction.
316 static void findLiveSetAtInst(Instruction *inst, GCPtrLivenessData &Data,
317 StatepointLiveSetTy &out);
318
319 // TODO: Once we can get to the GCStrategy, this becomes
320 // Optional<bool> isGCManagedPointer(const Type *Ty) const override {
321
isGCPointerType(Type * T)322 static bool isGCPointerType(Type *T) {
323 if (auto *PT = dyn_cast<PointerType>(T))
324 // For the sake of this example GC, we arbitrarily pick addrspace(1) as our
325 // GC managed heap. We know that a pointer into this heap needs to be
326 // updated and that no other pointer does.
327 return PT->getAddressSpace() == 1;
328 return false;
329 }
330
331 // Return true if this type is one which a) is a gc pointer or contains a GC
332 // pointer and b) is of a type this code expects to encounter as a live value.
333 // (The insertion code will assert that a type which matches (a) and not (b)
334 // is not encountered.)
isHandledGCPointerType(Type * T)335 static bool isHandledGCPointerType(Type *T) {
336 // We fully support gc pointers
337 if (isGCPointerType(T))
338 return true;
339 // We partially support vectors of gc pointers. The code will assert if it
340 // can't handle something.
341 if (auto VT = dyn_cast<VectorType>(T))
342 if (isGCPointerType(VT->getElementType()))
343 return true;
344 return false;
345 }
346
347 #ifndef NDEBUG
348 /// Returns true if this type contains a gc pointer whether we know how to
349 /// handle that type or not.
containsGCPtrType(Type * Ty)350 static bool containsGCPtrType(Type *Ty) {
351 if (isGCPointerType(Ty))
352 return true;
353 if (VectorType *VT = dyn_cast<VectorType>(Ty))
354 return isGCPointerType(VT->getScalarType());
355 if (ArrayType *AT = dyn_cast<ArrayType>(Ty))
356 return containsGCPtrType(AT->getElementType());
357 if (StructType *ST = dyn_cast<StructType>(Ty))
358 return llvm::any_of(ST->elements(), containsGCPtrType);
359 return false;
360 }
361
362 // Returns true if this is a type which a) is a gc pointer or contains a GC
363 // pointer and b) is of a type which the code doesn't expect (i.e. first class
364 // aggregates). Used to trip assertions.
isUnhandledGCPointerType(Type * Ty)365 static bool isUnhandledGCPointerType(Type *Ty) {
366 return containsGCPtrType(Ty) && !isHandledGCPointerType(Ty);
367 }
368 #endif
369
370 // Return the name of the value suffixed with the provided value, or if the
371 // value didn't have a name, the default value specified.
suffixed_name_or(Value * V,StringRef Suffix,StringRef DefaultName)372 static std::string suffixed_name_or(Value *V, StringRef Suffix,
373 StringRef DefaultName) {
374 return V->hasName() ? (V->getName() + Suffix).str() : DefaultName.str();
375 }
376
377 // Conservatively identifies any definitions which might be live at the
378 // given instruction. The analysis is performed immediately before the
379 // given instruction. Values defined by that instruction are not considered
380 // live. Values used by that instruction are considered live.
analyzeParsePointLiveness(DominatorTree & DT,GCPtrLivenessData & OriginalLivenessData,CallBase * Call,PartiallyConstructedSafepointRecord & Result)381 static void analyzeParsePointLiveness(
382 DominatorTree &DT, GCPtrLivenessData &OriginalLivenessData, CallBase *Call,
383 PartiallyConstructedSafepointRecord &Result) {
384 StatepointLiveSetTy LiveSet;
385 findLiveSetAtInst(Call, OriginalLivenessData, LiveSet);
386
387 if (PrintLiveSet) {
388 dbgs() << "Live Variables:\n";
389 for (Value *V : LiveSet)
390 dbgs() << " " << V->getName() << " " << *V << "\n";
391 }
392 if (PrintLiveSetSize) {
393 dbgs() << "Safepoint For: " << Call->getCalledOperand()->getName() << "\n";
394 dbgs() << "Number live values: " << LiveSet.size() << "\n";
395 }
396 Result.LiveSet = LiveSet;
397 }
398
399 /// Returns true if V is a known base.
400 static bool isKnownBase(Value *V, const IsKnownBaseMapTy &KnownBases);
401
402 /// Caches the IsKnownBase flag for a value and asserts that it wasn't present
403 /// in the cache before.
404 static void setKnownBase(Value *V, bool IsKnownBase,
405 IsKnownBaseMapTy &KnownBases);
406
407 static Value *findBaseDefiningValue(Value *I, DefiningValueMapTy &Cache,
408 IsKnownBaseMapTy &KnownBases);
409
410 /// Return a base defining value for the 'Index' element of the given vector
411 /// instruction 'I'. If Index is null, returns a BDV for the entire vector
412 /// 'I'. As an optimization, this method will try to determine when the
413 /// element is known to already be a base pointer. If this can be established,
414 /// the second value in the returned pair will be true. Note that either a
415 /// vector or a pointer typed value can be returned. For the former, the
416 /// vector returned is a BDV (and possibly a base) of the entire vector 'I'.
417 /// If the later, the return pointer is a BDV (or possibly a base) for the
418 /// particular element in 'I'.
findBaseDefiningValueOfVector(Value * I,DefiningValueMapTy & Cache,IsKnownBaseMapTy & KnownBases)419 static Value *findBaseDefiningValueOfVector(Value *I, DefiningValueMapTy &Cache,
420 IsKnownBaseMapTy &KnownBases) {
421 // Each case parallels findBaseDefiningValue below, see that code for
422 // detailed motivation.
423
424 auto Cached = Cache.find(I);
425 if (Cached != Cache.end())
426 return Cached->second;
427
428 if (isa<Argument>(I)) {
429 // An incoming argument to the function is a base pointer
430 Cache[I] = I;
431 setKnownBase(I, /* IsKnownBase */true, KnownBases);
432 return I;
433 }
434
435 if (isa<Constant>(I)) {
436 // Base of constant vector consists only of constant null pointers.
437 // For reasoning see similar case inside 'findBaseDefiningValue' function.
438 auto *CAZ = ConstantAggregateZero::get(I->getType());
439 Cache[I] = CAZ;
440 setKnownBase(CAZ, /* IsKnownBase */true, KnownBases);
441 return CAZ;
442 }
443
444 if (isa<LoadInst>(I)) {
445 Cache[I] = I;
446 setKnownBase(I, /* IsKnownBase */true, KnownBases);
447 return I;
448 }
449
450 if (isa<InsertElementInst>(I)) {
451 // We don't know whether this vector contains entirely base pointers or
452 // not. To be conservatively correct, we treat it as a BDV and will
453 // duplicate code as needed to construct a parallel vector of bases.
454 Cache[I] = I;
455 setKnownBase(I, /* IsKnownBase */false, KnownBases);
456 return I;
457 }
458
459 if (isa<ShuffleVectorInst>(I)) {
460 // We don't know whether this vector contains entirely base pointers or
461 // not. To be conservatively correct, we treat it as a BDV and will
462 // duplicate code as needed to construct a parallel vector of bases.
463 // TODO: There a number of local optimizations which could be applied here
464 // for particular sufflevector patterns.
465 Cache[I] = I;
466 setKnownBase(I, /* IsKnownBase */false, KnownBases);
467 return I;
468 }
469
470 // The behavior of getelementptr instructions is the same for vector and
471 // non-vector data types.
472 if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) {
473 auto *BDV =
474 findBaseDefiningValue(GEP->getPointerOperand(), Cache, KnownBases);
475 Cache[GEP] = BDV;
476 return BDV;
477 }
478
479 // The behavior of freeze instructions is the same for vector and
480 // non-vector data types.
481 if (auto *Freeze = dyn_cast<FreezeInst>(I)) {
482 auto *BDV = findBaseDefiningValue(Freeze->getOperand(0), Cache, KnownBases);
483 Cache[Freeze] = BDV;
484 return BDV;
485 }
486
487 // If the pointer comes through a bitcast of a vector of pointers to
488 // a vector of another type of pointer, then look through the bitcast
489 if (auto *BC = dyn_cast<BitCastInst>(I)) {
490 auto *BDV = findBaseDefiningValue(BC->getOperand(0), Cache, KnownBases);
491 Cache[BC] = BDV;
492 return BDV;
493 }
494
495 // We assume that functions in the source language only return base
496 // pointers. This should probably be generalized via attributes to support
497 // both source language and internal functions.
498 if (isa<CallInst>(I) || isa<InvokeInst>(I)) {
499 Cache[I] = I;
500 setKnownBase(I, /* IsKnownBase */true, KnownBases);
501 return I;
502 }
503
504 // A PHI or Select is a base defining value. The outer findBasePointer
505 // algorithm is responsible for constructing a base value for this BDV.
506 assert((isa<SelectInst>(I) || isa<PHINode>(I)) &&
507 "unknown vector instruction - no base found for vector element");
508 Cache[I] = I;
509 setKnownBase(I, /* IsKnownBase */false, KnownBases);
510 return I;
511 }
512
513 /// Helper function for findBasePointer - Will return a value which either a)
514 /// defines the base pointer for the input, b) blocks the simple search
515 /// (i.e. a PHI or Select of two derived pointers), or c) involves a change
516 /// from pointer to vector type or back.
findBaseDefiningValue(Value * I,DefiningValueMapTy & Cache,IsKnownBaseMapTy & KnownBases)517 static Value *findBaseDefiningValue(Value *I, DefiningValueMapTy &Cache,
518 IsKnownBaseMapTy &KnownBases) {
519 assert(I->getType()->isPtrOrPtrVectorTy() &&
520 "Illegal to ask for the base pointer of a non-pointer type");
521 auto Cached = Cache.find(I);
522 if (Cached != Cache.end())
523 return Cached->second;
524
525 if (I->getType()->isVectorTy())
526 return findBaseDefiningValueOfVector(I, Cache, KnownBases);
527
528 if (isa<Argument>(I)) {
529 // An incoming argument to the function is a base pointer
530 // We should have never reached here if this argument isn't an gc value
531 Cache[I] = I;
532 setKnownBase(I, /* IsKnownBase */true, KnownBases);
533 return I;
534 }
535
536 if (isa<Constant>(I)) {
537 // We assume that objects with a constant base (e.g. a global) can't move
538 // and don't need to be reported to the collector because they are always
539 // live. Besides global references, all kinds of constants (e.g. undef,
540 // constant expressions, null pointers) can be introduced by the inliner or
541 // the optimizer, especially on dynamically dead paths.
542 // Here we treat all of them as having single null base. By doing this we
543 // trying to avoid problems reporting various conflicts in a form of
544 // "phi (const1, const2)" or "phi (const, regular gc ptr)".
545 // See constant.ll file for relevant test cases.
546
547 auto *CPN = ConstantPointerNull::get(cast<PointerType>(I->getType()));
548 Cache[I] = CPN;
549 setKnownBase(CPN, /* IsKnownBase */true, KnownBases);
550 return CPN;
551 }
552
553 // inttoptrs in an integral address space are currently ill-defined. We
554 // treat them as defining base pointers here for consistency with the
555 // constant rule above and because we don't really have a better semantic
556 // to give them. Note that the optimizer is always free to insert undefined
557 // behavior on dynamically dead paths as well.
558 if (isa<IntToPtrInst>(I)) {
559 Cache[I] = I;
560 setKnownBase(I, /* IsKnownBase */true, KnownBases);
561 return I;
562 }
563
564 if (CastInst *CI = dyn_cast<CastInst>(I)) {
565 Value *Def = CI->stripPointerCasts();
566 // If stripping pointer casts changes the address space there is an
567 // addrspacecast in between.
568 assert(cast<PointerType>(Def->getType())->getAddressSpace() ==
569 cast<PointerType>(CI->getType())->getAddressSpace() &&
570 "unsupported addrspacecast");
571 // If we find a cast instruction here, it means we've found a cast which is
572 // not simply a pointer cast (i.e. an inttoptr). We don't know how to
573 // handle int->ptr conversion.
574 assert(!isa<CastInst>(Def) && "shouldn't find another cast here");
575 auto *BDV = findBaseDefiningValue(Def, Cache, KnownBases);
576 Cache[CI] = BDV;
577 return BDV;
578 }
579
580 if (isa<LoadInst>(I)) {
581 // The value loaded is an gc base itself
582 Cache[I] = I;
583 setKnownBase(I, /* IsKnownBase */true, KnownBases);
584 return I;
585 }
586
587 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I)) {
588 // The base of this GEP is the base
589 auto *BDV =
590 findBaseDefiningValue(GEP->getPointerOperand(), Cache, KnownBases);
591 Cache[GEP] = BDV;
592 return BDV;
593 }
594
595 if (auto *Freeze = dyn_cast<FreezeInst>(I)) {
596 auto *BDV = findBaseDefiningValue(Freeze->getOperand(0), Cache, KnownBases);
597 Cache[Freeze] = BDV;
598 return BDV;
599 }
600
601 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
602 switch (II->getIntrinsicID()) {
603 default:
604 // fall through to general call handling
605 break;
606 case Intrinsic::experimental_gc_statepoint:
607 llvm_unreachable("statepoints don't produce pointers");
608 case Intrinsic::experimental_gc_relocate:
609 // Rerunning safepoint insertion after safepoints are already
610 // inserted is not supported. It could probably be made to work,
611 // but why are you doing this? There's no good reason.
612 llvm_unreachable("repeat safepoint insertion is not supported");
613 case Intrinsic::gcroot:
614 // Currently, this mechanism hasn't been extended to work with gcroot.
615 // There's no reason it couldn't be, but I haven't thought about the
616 // implications much.
617 llvm_unreachable(
618 "interaction with the gcroot mechanism is not supported");
619 case Intrinsic::experimental_gc_get_pointer_base:
620 auto *BDV = findBaseDefiningValue(II->getOperand(0), Cache, KnownBases);
621 Cache[II] = BDV;
622 return BDV;
623 }
624 }
625 // We assume that functions in the source language only return base
626 // pointers. This should probably be generalized via attributes to support
627 // both source language and internal functions.
628 if (isa<CallInst>(I) || isa<InvokeInst>(I)) {
629 Cache[I] = I;
630 setKnownBase(I, /* IsKnownBase */true, KnownBases);
631 return I;
632 }
633
634 // TODO: I have absolutely no idea how to implement this part yet. It's not
635 // necessarily hard, I just haven't really looked at it yet.
636 assert(!isa<LandingPadInst>(I) && "Landing Pad is unimplemented");
637
638 if (isa<AtomicCmpXchgInst>(I)) {
639 // A CAS is effectively a atomic store and load combined under a
640 // predicate. From the perspective of base pointers, we just treat it
641 // like a load.
642 Cache[I] = I;
643 setKnownBase(I, /* IsKnownBase */true, KnownBases);
644 return I;
645 }
646
647 assert(!isa<AtomicRMWInst>(I) && "Xchg handled above, all others are "
648 "binary ops which don't apply to pointers");
649
650 // The aggregate ops. Aggregates can either be in the heap or on the
651 // stack, but in either case, this is simply a field load. As a result,
652 // this is a defining definition of the base just like a load is.
653 if (isa<ExtractValueInst>(I)) {
654 Cache[I] = I;
655 setKnownBase(I, /* IsKnownBase */true, KnownBases);
656 return I;
657 }
658
659 // We should never see an insert vector since that would require we be
660 // tracing back a struct value not a pointer value.
661 assert(!isa<InsertValueInst>(I) &&
662 "Base pointer for a struct is meaningless");
663
664 // This value might have been generated by findBasePointer() called when
665 // substituting gc.get.pointer.base() intrinsic.
666 bool IsKnownBase =
667 isa<Instruction>(I) && cast<Instruction>(I)->getMetadata("is_base_value");
668 setKnownBase(I, /* IsKnownBase */IsKnownBase, KnownBases);
669 Cache[I] = I;
670
671 // An extractelement produces a base result exactly when it's input does.
672 // We may need to insert a parallel instruction to extract the appropriate
673 // element out of the base vector corresponding to the input. Given this,
674 // it's analogous to the phi and select case even though it's not a merge.
675 if (isa<ExtractElementInst>(I))
676 // Note: There a lot of obvious peephole cases here. This are deliberately
677 // handled after the main base pointer inference algorithm to make writing
678 // test cases to exercise that code easier.
679 return I;
680
681 // The last two cases here don't return a base pointer. Instead, they
682 // return a value which dynamically selects from among several base
683 // derived pointers (each with it's own base potentially). It's the job of
684 // the caller to resolve these.
685 assert((isa<SelectInst>(I) || isa<PHINode>(I)) &&
686 "missing instruction case in findBaseDefiningValue");
687 return I;
688 }
689
690 /// Returns the base defining value for this value.
findBaseDefiningValueCached(Value * I,DefiningValueMapTy & Cache,IsKnownBaseMapTy & KnownBases)691 static Value *findBaseDefiningValueCached(Value *I, DefiningValueMapTy &Cache,
692 IsKnownBaseMapTy &KnownBases) {
693 if (Cache.find(I) == Cache.end()) {
694 auto *BDV = findBaseDefiningValue(I, Cache, KnownBases);
695 Cache[I] = BDV;
696 LLVM_DEBUG(dbgs() << "fBDV-cached: " << I->getName() << " -> "
697 << Cache[I]->getName() << ", is known base = "
698 << KnownBases[I] << "\n");
699 }
700 assert(Cache[I] != nullptr);
701 assert(KnownBases.find(Cache[I]) != KnownBases.end() &&
702 "Cached value must be present in known bases map");
703 return Cache[I];
704 }
705
706 /// Return a base pointer for this value if known. Otherwise, return it's
707 /// base defining value.
findBaseOrBDV(Value * I,DefiningValueMapTy & Cache,IsKnownBaseMapTy & KnownBases)708 static Value *findBaseOrBDV(Value *I, DefiningValueMapTy &Cache,
709 IsKnownBaseMapTy &KnownBases) {
710 Value *Def = findBaseDefiningValueCached(I, Cache, KnownBases);
711 auto Found = Cache.find(Def);
712 if (Found != Cache.end()) {
713 // Either a base-of relation, or a self reference. Caller must check.
714 return Found->second;
715 }
716 // Only a BDV available
717 return Def;
718 }
719
720 #ifndef NDEBUG
721 /// This value is a base pointer that is not generated by RS4GC, i.e. it already
722 /// exists in the code.
isOriginalBaseResult(Value * V)723 static bool isOriginalBaseResult(Value *V) {
724 // no recursion possible
725 return !isa<PHINode>(V) && !isa<SelectInst>(V) &&
726 !isa<ExtractElementInst>(V) && !isa<InsertElementInst>(V) &&
727 !isa<ShuffleVectorInst>(V);
728 }
729 #endif
730
isKnownBase(Value * V,const IsKnownBaseMapTy & KnownBases)731 static bool isKnownBase(Value *V, const IsKnownBaseMapTy &KnownBases) {
732 auto It = KnownBases.find(V);
733 assert(It != KnownBases.end() && "Value not present in the map");
734 return It->second;
735 }
736
setKnownBase(Value * V,bool IsKnownBase,IsKnownBaseMapTy & KnownBases)737 static void setKnownBase(Value *V, bool IsKnownBase,
738 IsKnownBaseMapTy &KnownBases) {
739 #ifndef NDEBUG
740 auto It = KnownBases.find(V);
741 if (It != KnownBases.end())
742 assert(It->second == IsKnownBase && "Changing already present value");
743 #endif
744 KnownBases[V] = IsKnownBase;
745 }
746
747 // Returns true if First and Second values are both scalar or both vector.
areBothVectorOrScalar(Value * First,Value * Second)748 static bool areBothVectorOrScalar(Value *First, Value *Second) {
749 return isa<VectorType>(First->getType()) ==
750 isa<VectorType>(Second->getType());
751 }
752
753 namespace {
754
755 /// Models the state of a single base defining value in the findBasePointer
756 /// algorithm for determining where a new instruction is needed to propagate
757 /// the base of this BDV.
758 class BDVState {
759 public:
760 enum StatusTy {
761 // Starting state of lattice
762 Unknown,
763 // Some specific base value -- does *not* mean that instruction
764 // propagates the base of the object
765 // ex: gep %arg, 16 -> %arg is the base value
766 Base,
767 // Need to insert a node to represent a merge.
768 Conflict
769 };
770
BDVState()771 BDVState() {
772 llvm_unreachable("missing state in map");
773 }
774
BDVState(Value * OriginalValue)775 explicit BDVState(Value *OriginalValue)
776 : OriginalValue(OriginalValue) {}
BDVState(Value * OriginalValue,StatusTy Status,Value * BaseValue=nullptr)777 explicit BDVState(Value *OriginalValue, StatusTy Status, Value *BaseValue = nullptr)
778 : OriginalValue(OriginalValue), Status(Status), BaseValue(BaseValue) {
779 assert(Status != Base || BaseValue);
780 }
781
getStatus() const782 StatusTy getStatus() const { return Status; }
getOriginalValue() const783 Value *getOriginalValue() const { return OriginalValue; }
getBaseValue() const784 Value *getBaseValue() const { return BaseValue; }
785
isBase() const786 bool isBase() const { return getStatus() == Base; }
isUnknown() const787 bool isUnknown() const { return getStatus() == Unknown; }
isConflict() const788 bool isConflict() const { return getStatus() == Conflict; }
789
790 // Values of type BDVState form a lattice, and this function implements the
791 // meet
792 // operation.
meet(const BDVState & Other)793 void meet(const BDVState &Other) {
794 auto markConflict = [&]() {
795 Status = BDVState::Conflict;
796 BaseValue = nullptr;
797 };
798 // Conflict is a final state.
799 if (isConflict())
800 return;
801 // if we are not known - just take other state.
802 if (isUnknown()) {
803 Status = Other.getStatus();
804 BaseValue = Other.getBaseValue();
805 return;
806 }
807 // We are base.
808 assert(isBase() && "Unknown state");
809 // If other is unknown - just keep our state.
810 if (Other.isUnknown())
811 return;
812 // If other is conflict - it is a final state.
813 if (Other.isConflict())
814 return markConflict();
815 // Other is base as well.
816 assert(Other.isBase() && "Unknown state");
817 // If bases are different - Conflict.
818 if (getBaseValue() != Other.getBaseValue())
819 return markConflict();
820 // We are identical, do nothing.
821 }
822
operator ==(const BDVState & Other) const823 bool operator==(const BDVState &Other) const {
824 return OriginalValue == Other.OriginalValue && BaseValue == Other.BaseValue &&
825 Status == Other.Status;
826 }
827
operator !=(const BDVState & other) const828 bool operator!=(const BDVState &other) const { return !(*this == other); }
829
830 LLVM_DUMP_METHOD
dump() const831 void dump() const {
832 print(dbgs());
833 dbgs() << '\n';
834 }
835
print(raw_ostream & OS) const836 void print(raw_ostream &OS) const {
837 switch (getStatus()) {
838 case Unknown:
839 OS << "U";
840 break;
841 case Base:
842 OS << "B";
843 break;
844 case Conflict:
845 OS << "C";
846 break;
847 }
848 OS << " (base " << getBaseValue() << " - "
849 << (getBaseValue() ? getBaseValue()->getName() : "nullptr") << ")"
850 << " for " << OriginalValue->getName() << ":";
851 }
852
853 private:
854 AssertingVH<Value> OriginalValue; // instruction this state corresponds to
855 StatusTy Status = Unknown;
856 AssertingVH<Value> BaseValue = nullptr; // Non-null only if Status == Base.
857 };
858
859 } // end anonymous namespace
860
861 #ifndef NDEBUG
operator <<(raw_ostream & OS,const BDVState & State)862 static raw_ostream &operator<<(raw_ostream &OS, const BDVState &State) {
863 State.print(OS);
864 return OS;
865 }
866 #endif
867
868 /// For a given value or instruction, figure out what base ptr its derived from.
869 /// For gc objects, this is simply itself. On success, returns a value which is
870 /// the base pointer. (This is reliable and can be used for relocation.) On
871 /// failure, returns nullptr.
findBasePointer(Value * I,DefiningValueMapTy & Cache,IsKnownBaseMapTy & KnownBases)872 static Value *findBasePointer(Value *I, DefiningValueMapTy &Cache,
873 IsKnownBaseMapTy &KnownBases) {
874 Value *Def = findBaseOrBDV(I, Cache, KnownBases);
875
876 if (isKnownBase(Def, KnownBases) && areBothVectorOrScalar(Def, I))
877 return Def;
878
879 // Here's the rough algorithm:
880 // - For every SSA value, construct a mapping to either an actual base
881 // pointer or a PHI which obscures the base pointer.
882 // - Construct a mapping from PHI to unknown TOP state. Use an
883 // optimistic algorithm to propagate base pointer information. Lattice
884 // looks like:
885 // UNKNOWN
886 // b1 b2 b3 b4
887 // CONFLICT
888 // When algorithm terminates, all PHIs will either have a single concrete
889 // base or be in a conflict state.
890 // - For every conflict, insert a dummy PHI node without arguments. Add
891 // these to the base[Instruction] = BasePtr mapping. For every
892 // non-conflict, add the actual base.
893 // - For every conflict, add arguments for the base[a] of each input
894 // arguments.
895 //
896 // Note: A simpler form of this would be to add the conflict form of all
897 // PHIs without running the optimistic algorithm. This would be
898 // analogous to pessimistic data flow and would likely lead to an
899 // overall worse solution.
900
901 #ifndef NDEBUG
902 auto isExpectedBDVType = [](Value *BDV) {
903 return isa<PHINode>(BDV) || isa<SelectInst>(BDV) ||
904 isa<ExtractElementInst>(BDV) || isa<InsertElementInst>(BDV) ||
905 isa<ShuffleVectorInst>(BDV);
906 };
907 #endif
908
909 // Once populated, will contain a mapping from each potentially non-base BDV
910 // to a lattice value (described above) which corresponds to that BDV.
911 // We use the order of insertion (DFS over the def/use graph) to provide a
912 // stable deterministic ordering for visiting DenseMaps (which are unordered)
913 // below. This is important for deterministic compilation.
914 MapVector<Value *, BDVState> States;
915
916 #ifndef NDEBUG
917 auto VerifyStates = [&]() {
918 for (auto &Entry : States) {
919 assert(Entry.first == Entry.second.getOriginalValue());
920 }
921 };
922 #endif
923
924 auto visitBDVOperands = [](Value *BDV, std::function<void (Value*)> F) {
925 if (PHINode *PN = dyn_cast<PHINode>(BDV)) {
926 for (Value *InVal : PN->incoming_values())
927 F(InVal);
928 } else if (SelectInst *SI = dyn_cast<SelectInst>(BDV)) {
929 F(SI->getTrueValue());
930 F(SI->getFalseValue());
931 } else if (auto *EE = dyn_cast<ExtractElementInst>(BDV)) {
932 F(EE->getVectorOperand());
933 } else if (auto *IE = dyn_cast<InsertElementInst>(BDV)) {
934 F(IE->getOperand(0));
935 F(IE->getOperand(1));
936 } else if (auto *SV = dyn_cast<ShuffleVectorInst>(BDV)) {
937 // For a canonical broadcast, ignore the undef argument
938 // (without this, we insert a parallel base shuffle for every broadcast)
939 F(SV->getOperand(0));
940 if (!SV->isZeroEltSplat())
941 F(SV->getOperand(1));
942 } else {
943 llvm_unreachable("unexpected BDV type");
944 }
945 };
946
947
948 // Recursively fill in all base defining values reachable from the initial
949 // one for which we don't already know a definite base value for
950 /* scope */ {
951 SmallVector<Value*, 16> Worklist;
952 Worklist.push_back(Def);
953 States.insert({Def, BDVState(Def)});
954 while (!Worklist.empty()) {
955 Value *Current = Worklist.pop_back_val();
956 assert(!isOriginalBaseResult(Current) && "why did it get added?");
957
958 auto visitIncomingValue = [&](Value *InVal) {
959 Value *Base = findBaseOrBDV(InVal, Cache, KnownBases);
960 if (isKnownBase(Base, KnownBases) && areBothVectorOrScalar(Base, InVal))
961 // Known bases won't need new instructions introduced and can be
962 // ignored safely. However, this can only be done when InVal and Base
963 // are both scalar or both vector. Otherwise, we need to find a
964 // correct BDV for InVal, by creating an entry in the lattice
965 // (States).
966 return;
967 assert(isExpectedBDVType(Base) && "the only non-base values "
968 "we see should be base defining values");
969 if (States.insert(std::make_pair(Base, BDVState(Base))).second)
970 Worklist.push_back(Base);
971 };
972
973 visitBDVOperands(Current, visitIncomingValue);
974 }
975 }
976
977 #ifndef NDEBUG
978 VerifyStates();
979 LLVM_DEBUG(dbgs() << "States after initialization:\n");
980 for (const auto &Pair : States) {
981 LLVM_DEBUG(dbgs() << " " << Pair.second << " for " << *Pair.first << "\n");
982 }
983 #endif
984
985 // Iterate forward through the value graph pruning any node from the state
986 // list where all of the inputs are base pointers. The purpose of this is to
987 // reuse existing values when the derived pointer we were asked to materialize
988 // a base pointer for happens to be a base pointer itself. (Or a sub-graph
989 // feeding it does.)
990 SmallVector<Value *> ToRemove;
991 do {
992 ToRemove.clear();
993 for (auto Pair : States) {
994 Value *BDV = Pair.first;
995 auto canPruneInput = [&](Value *V) {
996 // If the input of the BDV is the BDV itself we can prune it. This is
997 // only possible if the BDV is a PHI node.
998 if (V->stripPointerCasts() == BDV)
999 return true;
1000 Value *VBDV = findBaseOrBDV(V, Cache, KnownBases);
1001 if (V->stripPointerCasts() != VBDV)
1002 return false;
1003 // The assumption is that anything not in the state list is
1004 // propagates a base pointer.
1005 return States.count(VBDV) == 0;
1006 };
1007
1008 bool CanPrune = true;
1009 visitBDVOperands(BDV, [&](Value *Op) {
1010 CanPrune = CanPrune && canPruneInput(Op);
1011 });
1012 if (CanPrune)
1013 ToRemove.push_back(BDV);
1014 }
1015 for (Value *V : ToRemove) {
1016 States.erase(V);
1017 // Cache the fact V is it's own base for later usage.
1018 Cache[V] = V;
1019 }
1020 } while (!ToRemove.empty());
1021
1022 // Did we manage to prove that Def itself must be a base pointer?
1023 if (!States.count(Def))
1024 return Def;
1025
1026 // Return a phi state for a base defining value. We'll generate a new
1027 // base state for known bases and expect to find a cached state otherwise.
1028 auto GetStateForBDV = [&](Value *BaseValue, Value *Input) {
1029 auto I = States.find(BaseValue);
1030 if (I != States.end())
1031 return I->second;
1032 assert(areBothVectorOrScalar(BaseValue, Input));
1033 return BDVState(BaseValue, BDVState::Base, BaseValue);
1034 };
1035
1036 bool Progress = true;
1037 while (Progress) {
1038 #ifndef NDEBUG
1039 const size_t OldSize = States.size();
1040 #endif
1041 Progress = false;
1042 // We're only changing values in this loop, thus safe to keep iterators.
1043 // Since this is computing a fixed point, the order of visit does not
1044 // effect the result. TODO: We could use a worklist here and make this run
1045 // much faster.
1046 for (auto Pair : States) {
1047 Value *BDV = Pair.first;
1048 // Only values that do not have known bases or those that have differing
1049 // type (scalar versus vector) from a possible known base should be in the
1050 // lattice.
1051 assert((!isKnownBase(BDV, KnownBases) ||
1052 !areBothVectorOrScalar(BDV, Pair.second.getBaseValue())) &&
1053 "why did it get added?");
1054
1055 BDVState NewState(BDV);
1056 visitBDVOperands(BDV, [&](Value *Op) {
1057 Value *BDV = findBaseOrBDV(Op, Cache, KnownBases);
1058 auto OpState = GetStateForBDV(BDV, Op);
1059 NewState.meet(OpState);
1060 });
1061
1062 BDVState OldState = States[BDV];
1063 if (OldState != NewState) {
1064 Progress = true;
1065 States[BDV] = NewState;
1066 }
1067 }
1068
1069 assert(OldSize == States.size() &&
1070 "fixed point shouldn't be adding any new nodes to state");
1071 }
1072
1073 #ifndef NDEBUG
1074 VerifyStates();
1075 LLVM_DEBUG(dbgs() << "States after meet iteration:\n");
1076 for (const auto &Pair : States) {
1077 LLVM_DEBUG(dbgs() << " " << Pair.second << " for " << *Pair.first << "\n");
1078 }
1079 #endif
1080
1081 // Handle all instructions that have a vector BDV, but the instruction itself
1082 // is of scalar type.
1083 for (auto Pair : States) {
1084 Instruction *I = cast<Instruction>(Pair.first);
1085 BDVState State = Pair.second;
1086 auto *BaseValue = State.getBaseValue();
1087 // Only values that do not have known bases or those that have differing
1088 // type (scalar versus vector) from a possible known base should be in the
1089 // lattice.
1090 assert(
1091 (!isKnownBase(I, KnownBases) || !areBothVectorOrScalar(I, BaseValue)) &&
1092 "why did it get added?");
1093 assert(!State.isUnknown() && "Optimistic algorithm didn't complete!");
1094
1095 if (!State.isBase() || !isa<VectorType>(BaseValue->getType()))
1096 continue;
1097 // extractelement instructions are a bit special in that we may need to
1098 // insert an extract even when we know an exact base for the instruction.
1099 // The problem is that we need to convert from a vector base to a scalar
1100 // base for the particular indice we're interested in.
1101 if (isa<ExtractElementInst>(I)) {
1102 auto *EE = cast<ExtractElementInst>(I);
1103 // TODO: In many cases, the new instruction is just EE itself. We should
1104 // exploit this, but can't do it here since it would break the invariant
1105 // about the BDV not being known to be a base.
1106 auto *BaseInst = ExtractElementInst::Create(
1107 State.getBaseValue(), EE->getIndexOperand(), "base_ee", EE);
1108 BaseInst->setMetadata("is_base_value", MDNode::get(I->getContext(), {}));
1109 States[I] = BDVState(I, BDVState::Base, BaseInst);
1110 setKnownBase(BaseInst, /* IsKnownBase */true, KnownBases);
1111 } else if (!isa<VectorType>(I->getType())) {
1112 // We need to handle cases that have a vector base but the instruction is
1113 // a scalar type (these could be phis or selects or any instruction that
1114 // are of scalar type, but the base can be a vector type). We
1115 // conservatively set this as conflict. Setting the base value for these
1116 // conflicts is handled in the next loop which traverses States.
1117 States[I] = BDVState(I, BDVState::Conflict);
1118 }
1119 }
1120
1121 #ifndef NDEBUG
1122 VerifyStates();
1123 #endif
1124
1125 // Insert Phis for all conflicts
1126 // TODO: adjust naming patterns to avoid this order of iteration dependency
1127 for (auto Pair : States) {
1128 Instruction *I = cast<Instruction>(Pair.first);
1129 BDVState State = Pair.second;
1130 // Only values that do not have known bases or those that have differing
1131 // type (scalar versus vector) from a possible known base should be in the
1132 // lattice.
1133 assert((!isKnownBase(I, KnownBases) ||
1134 !areBothVectorOrScalar(I, State.getBaseValue())) &&
1135 "why did it get added?");
1136 assert(!State.isUnknown() && "Optimistic algorithm didn't complete!");
1137
1138 // Since we're joining a vector and scalar base, they can never be the
1139 // same. As a result, we should always see insert element having reached
1140 // the conflict state.
1141 assert(!isa<InsertElementInst>(I) || State.isConflict());
1142
1143 if (!State.isConflict())
1144 continue;
1145
1146 auto getMangledName = [](Instruction *I) -> std::string {
1147 if (isa<PHINode>(I)) {
1148 return suffixed_name_or(I, ".base", "base_phi");
1149 } else if (isa<SelectInst>(I)) {
1150 return suffixed_name_or(I, ".base", "base_select");
1151 } else if (isa<ExtractElementInst>(I)) {
1152 return suffixed_name_or(I, ".base", "base_ee");
1153 } else if (isa<InsertElementInst>(I)) {
1154 return suffixed_name_or(I, ".base", "base_ie");
1155 } else {
1156 return suffixed_name_or(I, ".base", "base_sv");
1157 }
1158 };
1159
1160 Instruction *BaseInst = I->clone();
1161 BaseInst->insertBefore(I);
1162 BaseInst->setName(getMangledName(I));
1163 // Add metadata marking this as a base value
1164 BaseInst->setMetadata("is_base_value", MDNode::get(I->getContext(), {}));
1165 States[I] = BDVState(I, BDVState::Conflict, BaseInst);
1166 setKnownBase(BaseInst, /* IsKnownBase */true, KnownBases);
1167 }
1168
1169 #ifndef NDEBUG
1170 VerifyStates();
1171 #endif
1172
1173 // Returns a instruction which produces the base pointer for a given
1174 // instruction. The instruction is assumed to be an input to one of the BDVs
1175 // seen in the inference algorithm above. As such, we must either already
1176 // know it's base defining value is a base, or have inserted a new
1177 // instruction to propagate the base of it's BDV and have entered that newly
1178 // introduced instruction into the state table. In either case, we are
1179 // assured to be able to determine an instruction which produces it's base
1180 // pointer.
1181 auto getBaseForInput = [&](Value *Input, Instruction *InsertPt) {
1182 Value *BDV = findBaseOrBDV(Input, Cache, KnownBases);
1183 Value *Base = nullptr;
1184 if (!States.count(BDV)) {
1185 assert(areBothVectorOrScalar(BDV, Input));
1186 Base = BDV;
1187 } else {
1188 // Either conflict or base.
1189 assert(States.count(BDV));
1190 Base = States[BDV].getBaseValue();
1191 }
1192 assert(Base && "Can't be null");
1193 // The cast is needed since base traversal may strip away bitcasts
1194 if (Base->getType() != Input->getType() && InsertPt)
1195 Base = new BitCastInst(Base, Input->getType(), "cast", InsertPt);
1196 return Base;
1197 };
1198
1199 // Fixup all the inputs of the new PHIs. Visit order needs to be
1200 // deterministic and predictable because we're naming newly created
1201 // instructions.
1202 for (auto Pair : States) {
1203 Instruction *BDV = cast<Instruction>(Pair.first);
1204 BDVState State = Pair.second;
1205
1206 // Only values that do not have known bases or those that have differing
1207 // type (scalar versus vector) from a possible known base should be in the
1208 // lattice.
1209 assert((!isKnownBase(BDV, KnownBases) ||
1210 !areBothVectorOrScalar(BDV, State.getBaseValue())) &&
1211 "why did it get added?");
1212 assert(!State.isUnknown() && "Optimistic algorithm didn't complete!");
1213 if (!State.isConflict())
1214 continue;
1215
1216 if (PHINode *BasePHI = dyn_cast<PHINode>(State.getBaseValue())) {
1217 PHINode *PN = cast<PHINode>(BDV);
1218 const unsigned NumPHIValues = PN->getNumIncomingValues();
1219
1220 // The IR verifier requires phi nodes with multiple entries from the
1221 // same basic block to have the same incoming value for each of those
1222 // entries. Since we're inserting bitcasts in the loop, make sure we
1223 // do so at least once per incoming block.
1224 DenseMap<BasicBlock *, Value*> BlockToValue;
1225 for (unsigned i = 0; i < NumPHIValues; i++) {
1226 Value *InVal = PN->getIncomingValue(i);
1227 BasicBlock *InBB = PN->getIncomingBlock(i);
1228 if (!BlockToValue.count(InBB))
1229 BlockToValue[InBB] = getBaseForInput(InVal, InBB->getTerminator());
1230 else {
1231 #ifndef NDEBUG
1232 Value *OldBase = BlockToValue[InBB];
1233 Value *Base = getBaseForInput(InVal, nullptr);
1234
1235 // We can't use `stripPointerCasts` instead of this function because
1236 // `stripPointerCasts` doesn't handle vectors of pointers.
1237 auto StripBitCasts = [](Value *V) -> Value * {
1238 while (auto *BC = dyn_cast<BitCastInst>(V))
1239 V = BC->getOperand(0);
1240 return V;
1241 };
1242 // In essence this assert states: the only way two values
1243 // incoming from the same basic block may be different is by
1244 // being different bitcasts of the same value. A cleanup
1245 // that remains TODO is changing findBaseOrBDV to return an
1246 // llvm::Value of the correct type (and still remain pure).
1247 // This will remove the need to add bitcasts.
1248 assert(StripBitCasts(Base) == StripBitCasts(OldBase) &&
1249 "findBaseOrBDV should be pure!");
1250 #endif
1251 }
1252 Value *Base = BlockToValue[InBB];
1253 BasePHI->setIncomingValue(i, Base);
1254 }
1255 } else if (SelectInst *BaseSI =
1256 dyn_cast<SelectInst>(State.getBaseValue())) {
1257 SelectInst *SI = cast<SelectInst>(BDV);
1258
1259 // Find the instruction which produces the base for each input.
1260 // We may need to insert a bitcast.
1261 BaseSI->setTrueValue(getBaseForInput(SI->getTrueValue(), BaseSI));
1262 BaseSI->setFalseValue(getBaseForInput(SI->getFalseValue(), BaseSI));
1263 } else if (auto *BaseEE =
1264 dyn_cast<ExtractElementInst>(State.getBaseValue())) {
1265 Value *InVal = cast<ExtractElementInst>(BDV)->getVectorOperand();
1266 // Find the instruction which produces the base for each input. We may
1267 // need to insert a bitcast.
1268 BaseEE->setOperand(0, getBaseForInput(InVal, BaseEE));
1269 } else if (auto *BaseIE = dyn_cast<InsertElementInst>(State.getBaseValue())){
1270 auto *BdvIE = cast<InsertElementInst>(BDV);
1271 auto UpdateOperand = [&](int OperandIdx) {
1272 Value *InVal = BdvIE->getOperand(OperandIdx);
1273 Value *Base = getBaseForInput(InVal, BaseIE);
1274 BaseIE->setOperand(OperandIdx, Base);
1275 };
1276 UpdateOperand(0); // vector operand
1277 UpdateOperand(1); // scalar operand
1278 } else {
1279 auto *BaseSV = cast<ShuffleVectorInst>(State.getBaseValue());
1280 auto *BdvSV = cast<ShuffleVectorInst>(BDV);
1281 auto UpdateOperand = [&](int OperandIdx) {
1282 Value *InVal = BdvSV->getOperand(OperandIdx);
1283 Value *Base = getBaseForInput(InVal, BaseSV);
1284 BaseSV->setOperand(OperandIdx, Base);
1285 };
1286 UpdateOperand(0); // vector operand
1287 if (!BdvSV->isZeroEltSplat())
1288 UpdateOperand(1); // vector operand
1289 else {
1290 // Never read, so just use undef
1291 Value *InVal = BdvSV->getOperand(1);
1292 BaseSV->setOperand(1, UndefValue::get(InVal->getType()));
1293 }
1294 }
1295 }
1296
1297 #ifndef NDEBUG
1298 VerifyStates();
1299 #endif
1300
1301 // Cache all of our results so we can cheaply reuse them
1302 // NOTE: This is actually two caches: one of the base defining value
1303 // relation and one of the base pointer relation! FIXME
1304 for (auto Pair : States) {
1305 auto *BDV = Pair.first;
1306 Value *Base = Pair.second.getBaseValue();
1307 assert(BDV && Base);
1308 // Only values that do not have known bases or those that have differing
1309 // type (scalar versus vector) from a possible known base should be in the
1310 // lattice.
1311 assert(
1312 (!isKnownBase(BDV, KnownBases) || !areBothVectorOrScalar(BDV, Base)) &&
1313 "why did it get added?");
1314
1315 LLVM_DEBUG(
1316 dbgs() << "Updating base value cache"
1317 << " for: " << BDV->getName() << " from: "
1318 << (Cache.count(BDV) ? Cache[BDV]->getName().str() : "none")
1319 << " to: " << Base->getName() << "\n");
1320
1321 Cache[BDV] = Base;
1322 }
1323 assert(Cache.count(Def));
1324 return Cache[Def];
1325 }
1326
1327 // For a set of live pointers (base and/or derived), identify the base
1328 // pointer of the object which they are derived from. This routine will
1329 // mutate the IR graph as needed to make the 'base' pointer live at the
1330 // definition site of 'derived'. This ensures that any use of 'derived' can
1331 // also use 'base'. This may involve the insertion of a number of
1332 // additional PHI nodes.
1333 //
1334 // preconditions: live is a set of pointer type Values
1335 //
1336 // side effects: may insert PHI nodes into the existing CFG, will preserve
1337 // CFG, will not remove or mutate any existing nodes
1338 //
1339 // post condition: PointerToBase contains one (derived, base) pair for every
1340 // pointer in live. Note that derived can be equal to base if the original
1341 // pointer was a base pointer.
findBasePointers(const StatepointLiveSetTy & live,PointerToBaseTy & PointerToBase,DominatorTree * DT,DefiningValueMapTy & DVCache,IsKnownBaseMapTy & KnownBases)1342 static void findBasePointers(const StatepointLiveSetTy &live,
1343 PointerToBaseTy &PointerToBase, DominatorTree *DT,
1344 DefiningValueMapTy &DVCache,
1345 IsKnownBaseMapTy &KnownBases) {
1346 for (Value *ptr : live) {
1347 Value *base = findBasePointer(ptr, DVCache, KnownBases);
1348 assert(base && "failed to find base pointer");
1349 PointerToBase[ptr] = base;
1350 assert((!isa<Instruction>(base) || !isa<Instruction>(ptr) ||
1351 DT->dominates(cast<Instruction>(base)->getParent(),
1352 cast<Instruction>(ptr)->getParent())) &&
1353 "The base we found better dominate the derived pointer");
1354 }
1355 }
1356
1357 /// Find the required based pointers (and adjust the live set) for the given
1358 /// parse point.
findBasePointers(DominatorTree & DT,DefiningValueMapTy & DVCache,CallBase * Call,PartiallyConstructedSafepointRecord & result,PointerToBaseTy & PointerToBase,IsKnownBaseMapTy & KnownBases)1359 static void findBasePointers(DominatorTree &DT, DefiningValueMapTy &DVCache,
1360 CallBase *Call,
1361 PartiallyConstructedSafepointRecord &result,
1362 PointerToBaseTy &PointerToBase,
1363 IsKnownBaseMapTy &KnownBases) {
1364 StatepointLiveSetTy PotentiallyDerivedPointers = result.LiveSet;
1365 // We assume that all pointers passed to deopt are base pointers; as an
1366 // optimization, we can use this to avoid seperately materializing the base
1367 // pointer graph. This is only relevant since we're very conservative about
1368 // generating new conflict nodes during base pointer insertion. If we were
1369 // smarter there, this would be irrelevant.
1370 if (auto Opt = Call->getOperandBundle(LLVMContext::OB_deopt))
1371 for (Value *V : Opt->Inputs) {
1372 if (!PotentiallyDerivedPointers.count(V))
1373 continue;
1374 PotentiallyDerivedPointers.remove(V);
1375 PointerToBase[V] = V;
1376 }
1377 findBasePointers(PotentiallyDerivedPointers, PointerToBase, &DT, DVCache,
1378 KnownBases);
1379 }
1380
1381 /// Given an updated version of the dataflow liveness results, update the
1382 /// liveset and base pointer maps for the call site CS.
1383 static void recomputeLiveInValues(GCPtrLivenessData &RevisedLivenessData,
1384 CallBase *Call,
1385 PartiallyConstructedSafepointRecord &result,
1386 PointerToBaseTy &PointerToBase);
1387
recomputeLiveInValues(Function & F,DominatorTree & DT,ArrayRef<CallBase * > toUpdate,MutableArrayRef<struct PartiallyConstructedSafepointRecord> records,PointerToBaseTy & PointerToBase)1388 static void recomputeLiveInValues(
1389 Function &F, DominatorTree &DT, ArrayRef<CallBase *> toUpdate,
1390 MutableArrayRef<struct PartiallyConstructedSafepointRecord> records,
1391 PointerToBaseTy &PointerToBase) {
1392 // TODO-PERF: reuse the original liveness, then simply run the dataflow
1393 // again. The old values are still live and will help it stabilize quickly.
1394 GCPtrLivenessData RevisedLivenessData;
1395 computeLiveInValues(DT, F, RevisedLivenessData);
1396 for (size_t i = 0; i < records.size(); i++) {
1397 struct PartiallyConstructedSafepointRecord &info = records[i];
1398 recomputeLiveInValues(RevisedLivenessData, toUpdate[i], info,
1399 PointerToBase);
1400 }
1401 }
1402
1403 // When inserting gc.relocate and gc.result calls, we need to ensure there are
1404 // no uses of the original value / return value between the gc.statepoint and
1405 // the gc.relocate / gc.result call. One case which can arise is a phi node
1406 // starting one of the successor blocks. We also need to be able to insert the
1407 // gc.relocates only on the path which goes through the statepoint. We might
1408 // need to split an edge to make this possible.
1409 static BasicBlock *
normalizeForInvokeSafepoint(BasicBlock * BB,BasicBlock * InvokeParent,DominatorTree & DT)1410 normalizeForInvokeSafepoint(BasicBlock *BB, BasicBlock *InvokeParent,
1411 DominatorTree &DT) {
1412 BasicBlock *Ret = BB;
1413 if (!BB->getUniquePredecessor())
1414 Ret = SplitBlockPredecessors(BB, InvokeParent, "", &DT);
1415
1416 // Now that 'Ret' has unique predecessor we can safely remove all phi nodes
1417 // from it
1418 FoldSingleEntryPHINodes(Ret);
1419 assert(!isa<PHINode>(Ret->begin()) &&
1420 "All PHI nodes should have been removed!");
1421
1422 // At this point, we can safely insert a gc.relocate or gc.result as the first
1423 // instruction in Ret if needed.
1424 return Ret;
1425 }
1426
1427 // List of all function attributes which must be stripped when lowering from
1428 // abstract machine model to physical machine model. Essentially, these are
1429 // all the effects a safepoint might have which we ignored in the abstract
1430 // machine model for purposes of optimization. We have to strip these on
1431 // both function declarations and call sites.
1432 static constexpr Attribute::AttrKind FnAttrsToStrip[] =
1433 {Attribute::ReadNone, Attribute::ReadOnly, Attribute::WriteOnly,
1434 Attribute::ArgMemOnly, Attribute::InaccessibleMemOnly,
1435 Attribute::InaccessibleMemOrArgMemOnly,
1436 Attribute::NoSync, Attribute::NoFree};
1437
1438 // Create new attribute set containing only attributes which can be transferred
1439 // from original call to the safepoint.
legalizeCallAttributes(LLVMContext & Ctx,AttributeList OrigAL,AttributeList StatepointAL)1440 static AttributeList legalizeCallAttributes(LLVMContext &Ctx,
1441 AttributeList OrigAL,
1442 AttributeList StatepointAL) {
1443 if (OrigAL.isEmpty())
1444 return StatepointAL;
1445
1446 // Remove the readonly, readnone, and statepoint function attributes.
1447 AttrBuilder FnAttrs(Ctx, OrigAL.getFnAttrs());
1448 for (auto Attr : FnAttrsToStrip)
1449 FnAttrs.removeAttribute(Attr);
1450
1451 for (Attribute A : OrigAL.getFnAttrs()) {
1452 if (isStatepointDirectiveAttr(A))
1453 FnAttrs.removeAttribute(A);
1454 }
1455
1456 // Just skip parameter and return attributes for now
1457 return StatepointAL.addFnAttributes(Ctx, FnAttrs);
1458 }
1459
1460 /// Helper function to place all gc relocates necessary for the given
1461 /// statepoint.
1462 /// Inputs:
1463 /// liveVariables - list of variables to be relocated.
1464 /// basePtrs - base pointers.
1465 /// statepointToken - statepoint instruction to which relocates should be
1466 /// bound.
1467 /// Builder - Llvm IR builder to be used to construct new calls.
CreateGCRelocates(ArrayRef<Value * > LiveVariables,ArrayRef<Value * > BasePtrs,Instruction * StatepointToken,IRBuilder<> & Builder)1468 static void CreateGCRelocates(ArrayRef<Value *> LiveVariables,
1469 ArrayRef<Value *> BasePtrs,
1470 Instruction *StatepointToken,
1471 IRBuilder<> &Builder) {
1472 if (LiveVariables.empty())
1473 return;
1474
1475 auto FindIndex = [](ArrayRef<Value *> LiveVec, Value *Val) {
1476 auto ValIt = llvm::find(LiveVec, Val);
1477 assert(ValIt != LiveVec.end() && "Val not found in LiveVec!");
1478 size_t Index = std::distance(LiveVec.begin(), ValIt);
1479 assert(Index < LiveVec.size() && "Bug in std::find?");
1480 return Index;
1481 };
1482 Module *M = StatepointToken->getModule();
1483
1484 // All gc_relocate are generated as i8 addrspace(1)* (or a vector type whose
1485 // element type is i8 addrspace(1)*). We originally generated unique
1486 // declarations for each pointer type, but this proved problematic because
1487 // the intrinsic mangling code is incomplete and fragile. Since we're moving
1488 // towards a single unified pointer type anyways, we can just cast everything
1489 // to an i8* of the right address space. A bitcast is added later to convert
1490 // gc_relocate to the actual value's type.
1491 auto getGCRelocateDecl = [&] (Type *Ty) {
1492 assert(isHandledGCPointerType(Ty));
1493 auto AS = Ty->getScalarType()->getPointerAddressSpace();
1494 Type *NewTy = Type::getInt8PtrTy(M->getContext(), AS);
1495 if (auto *VT = dyn_cast<VectorType>(Ty))
1496 NewTy = FixedVectorType::get(NewTy,
1497 cast<FixedVectorType>(VT)->getNumElements());
1498 return Intrinsic::getDeclaration(M, Intrinsic::experimental_gc_relocate,
1499 {NewTy});
1500 };
1501
1502 // Lazily populated map from input types to the canonicalized form mentioned
1503 // in the comment above. This should probably be cached somewhere more
1504 // broadly.
1505 DenseMap<Type *, Function *> TypeToDeclMap;
1506
1507 for (unsigned i = 0; i < LiveVariables.size(); i++) {
1508 // Generate the gc.relocate call and save the result
1509 Value *BaseIdx = Builder.getInt32(FindIndex(LiveVariables, BasePtrs[i]));
1510 Value *LiveIdx = Builder.getInt32(i);
1511
1512 Type *Ty = LiveVariables[i]->getType();
1513 if (!TypeToDeclMap.count(Ty))
1514 TypeToDeclMap[Ty] = getGCRelocateDecl(Ty);
1515 Function *GCRelocateDecl = TypeToDeclMap[Ty];
1516
1517 // only specify a debug name if we can give a useful one
1518 CallInst *Reloc = Builder.CreateCall(
1519 GCRelocateDecl, {StatepointToken, BaseIdx, LiveIdx},
1520 suffixed_name_or(LiveVariables[i], ".relocated", ""));
1521 // Trick CodeGen into thinking there are lots of free registers at this
1522 // fake call.
1523 Reloc->setCallingConv(CallingConv::Cold);
1524 }
1525 }
1526
1527 namespace {
1528
1529 /// This struct is used to defer RAUWs and `eraseFromParent` s. Using this
1530 /// avoids having to worry about keeping around dangling pointers to Values.
1531 class DeferredReplacement {
1532 AssertingVH<Instruction> Old;
1533 AssertingVH<Instruction> New;
1534 bool IsDeoptimize = false;
1535
1536 DeferredReplacement() = default;
1537
1538 public:
createRAUW(Instruction * Old,Instruction * New)1539 static DeferredReplacement createRAUW(Instruction *Old, Instruction *New) {
1540 assert(Old != New && Old && New &&
1541 "Cannot RAUW equal values or to / from null!");
1542
1543 DeferredReplacement D;
1544 D.Old = Old;
1545 D.New = New;
1546 return D;
1547 }
1548
createDelete(Instruction * ToErase)1549 static DeferredReplacement createDelete(Instruction *ToErase) {
1550 DeferredReplacement D;
1551 D.Old = ToErase;
1552 return D;
1553 }
1554
createDeoptimizeReplacement(Instruction * Old)1555 static DeferredReplacement createDeoptimizeReplacement(Instruction *Old) {
1556 #ifndef NDEBUG
1557 auto *F = cast<CallInst>(Old)->getCalledFunction();
1558 assert(F && F->getIntrinsicID() == Intrinsic::experimental_deoptimize &&
1559 "Only way to construct a deoptimize deferred replacement");
1560 #endif
1561 DeferredReplacement D;
1562 D.Old = Old;
1563 D.IsDeoptimize = true;
1564 return D;
1565 }
1566
1567 /// Does the task represented by this instance.
doReplacement()1568 void doReplacement() {
1569 Instruction *OldI = Old;
1570 Instruction *NewI = New;
1571
1572 assert(OldI != NewI && "Disallowed at construction?!");
1573 assert((!IsDeoptimize || !New) &&
1574 "Deoptimize intrinsics are not replaced!");
1575
1576 Old = nullptr;
1577 New = nullptr;
1578
1579 if (NewI)
1580 OldI->replaceAllUsesWith(NewI);
1581
1582 if (IsDeoptimize) {
1583 // Note: we've inserted instructions, so the call to llvm.deoptimize may
1584 // not necessarily be followed by the matching return.
1585 auto *RI = cast<ReturnInst>(OldI->getParent()->getTerminator());
1586 new UnreachableInst(RI->getContext(), RI);
1587 RI->eraseFromParent();
1588 }
1589
1590 OldI->eraseFromParent();
1591 }
1592 };
1593
1594 } // end anonymous namespace
1595
getDeoptLowering(CallBase * Call)1596 static StringRef getDeoptLowering(CallBase *Call) {
1597 const char *DeoptLowering = "deopt-lowering";
1598 if (Call->hasFnAttr(DeoptLowering)) {
1599 // FIXME: Calls have a *really* confusing interface around attributes
1600 // with values.
1601 const AttributeList &CSAS = Call->getAttributes();
1602 if (CSAS.hasFnAttr(DeoptLowering))
1603 return CSAS.getFnAttr(DeoptLowering).getValueAsString();
1604 Function *F = Call->getCalledFunction();
1605 assert(F && F->hasFnAttribute(DeoptLowering));
1606 return F->getFnAttribute(DeoptLowering).getValueAsString();
1607 }
1608 return "live-through";
1609 }
1610
1611 static void
makeStatepointExplicitImpl(CallBase * Call,const SmallVectorImpl<Value * > & BasePtrs,const SmallVectorImpl<Value * > & LiveVariables,PartiallyConstructedSafepointRecord & Result,std::vector<DeferredReplacement> & Replacements,const PointerToBaseTy & PointerToBase)1612 makeStatepointExplicitImpl(CallBase *Call, /* to replace */
1613 const SmallVectorImpl<Value *> &BasePtrs,
1614 const SmallVectorImpl<Value *> &LiveVariables,
1615 PartiallyConstructedSafepointRecord &Result,
1616 std::vector<DeferredReplacement> &Replacements,
1617 const PointerToBaseTy &PointerToBase) {
1618 assert(BasePtrs.size() == LiveVariables.size());
1619
1620 // Then go ahead and use the builder do actually do the inserts. We insert
1621 // immediately before the previous instruction under the assumption that all
1622 // arguments will be available here. We can't insert afterwards since we may
1623 // be replacing a terminator.
1624 IRBuilder<> Builder(Call);
1625
1626 ArrayRef<Value *> GCArgs(LiveVariables);
1627 uint64_t StatepointID = StatepointDirectives::DefaultStatepointID;
1628 uint32_t NumPatchBytes = 0;
1629 uint32_t Flags = uint32_t(StatepointFlags::None);
1630
1631 SmallVector<Value *, 8> CallArgs(Call->args());
1632 Optional<ArrayRef<Use>> DeoptArgs;
1633 if (auto Bundle = Call->getOperandBundle(LLVMContext::OB_deopt))
1634 DeoptArgs = Bundle->Inputs;
1635 Optional<ArrayRef<Use>> TransitionArgs;
1636 if (auto Bundle = Call->getOperandBundle(LLVMContext::OB_gc_transition)) {
1637 TransitionArgs = Bundle->Inputs;
1638 // TODO: This flag no longer serves a purpose and can be removed later
1639 Flags |= uint32_t(StatepointFlags::GCTransition);
1640 }
1641
1642 // Instead of lowering calls to @llvm.experimental.deoptimize as normal calls
1643 // with a return value, we lower then as never returning calls to
1644 // __llvm_deoptimize that are followed by unreachable to get better codegen.
1645 bool IsDeoptimize = false;
1646
1647 StatepointDirectives SD =
1648 parseStatepointDirectivesFromAttrs(Call->getAttributes());
1649 if (SD.NumPatchBytes)
1650 NumPatchBytes = *SD.NumPatchBytes;
1651 if (SD.StatepointID)
1652 StatepointID = *SD.StatepointID;
1653
1654 // Pass through the requested lowering if any. The default is live-through.
1655 StringRef DeoptLowering = getDeoptLowering(Call);
1656 if (DeoptLowering.equals("live-in"))
1657 Flags |= uint32_t(StatepointFlags::DeoptLiveIn);
1658 else {
1659 assert(DeoptLowering.equals("live-through") && "Unsupported value!");
1660 }
1661
1662 FunctionCallee CallTarget(Call->getFunctionType(), Call->getCalledOperand());
1663 if (Function *F = dyn_cast<Function>(CallTarget.getCallee())) {
1664 auto IID = F->getIntrinsicID();
1665 if (IID == Intrinsic::experimental_deoptimize) {
1666 // Calls to llvm.experimental.deoptimize are lowered to calls to the
1667 // __llvm_deoptimize symbol. We want to resolve this now, since the
1668 // verifier does not allow taking the address of an intrinsic function.
1669
1670 SmallVector<Type *, 8> DomainTy;
1671 for (Value *Arg : CallArgs)
1672 DomainTy.push_back(Arg->getType());
1673 auto *FTy = FunctionType::get(Type::getVoidTy(F->getContext()), DomainTy,
1674 /* isVarArg = */ false);
1675
1676 // Note: CallTarget can be a bitcast instruction of a symbol if there are
1677 // calls to @llvm.experimental.deoptimize with different argument types in
1678 // the same module. This is fine -- we assume the frontend knew what it
1679 // was doing when generating this kind of IR.
1680 CallTarget = F->getParent()
1681 ->getOrInsertFunction("__llvm_deoptimize", FTy);
1682
1683 IsDeoptimize = true;
1684 } else if (IID == Intrinsic::memcpy_element_unordered_atomic ||
1685 IID == Intrinsic::memmove_element_unordered_atomic) {
1686 // Unordered atomic memcpy and memmove intrinsics which are not explicitly
1687 // marked as "gc-leaf-function" should be lowered in a GC parseable way.
1688 // Specifically, these calls should be lowered to the
1689 // __llvm_{memcpy|memmove}_element_unordered_atomic_safepoint symbols.
1690 // Similarly to __llvm_deoptimize we want to resolve this now, since the
1691 // verifier does not allow taking the address of an intrinsic function.
1692 //
1693 // Moreover we need to shuffle the arguments for the call in order to
1694 // accommodate GC. The underlying source and destination objects might be
1695 // relocated during copy operation should the GC occur. To relocate the
1696 // derived source and destination pointers the implementation of the
1697 // intrinsic should know the corresponding base pointers.
1698 //
1699 // To make the base pointers available pass them explicitly as arguments:
1700 // memcpy(dest_derived, source_derived, ...) =>
1701 // memcpy(dest_base, dest_offset, source_base, source_offset, ...)
1702 auto &Context = Call->getContext();
1703 auto &DL = Call->getModule()->getDataLayout();
1704 auto GetBaseAndOffset = [&](Value *Derived) {
1705 Value *Base = nullptr;
1706 // Optimizations in unreachable code might substitute the real pointer
1707 // with undef, poison or null-derived constant. Return null base for
1708 // them to be consistent with the handling in the main algorithm in
1709 // findBaseDefiningValue.
1710 if (isa<Constant>(Derived))
1711 Base =
1712 ConstantPointerNull::get(cast<PointerType>(Derived->getType()));
1713 else {
1714 assert(PointerToBase.count(Derived));
1715 Base = PointerToBase.find(Derived)->second;
1716 }
1717 unsigned AddressSpace = Derived->getType()->getPointerAddressSpace();
1718 unsigned IntPtrSize = DL.getPointerSizeInBits(AddressSpace);
1719 Value *Base_int = Builder.CreatePtrToInt(
1720 Base, Type::getIntNTy(Context, IntPtrSize));
1721 Value *Derived_int = Builder.CreatePtrToInt(
1722 Derived, Type::getIntNTy(Context, IntPtrSize));
1723 return std::make_pair(Base, Builder.CreateSub(Derived_int, Base_int));
1724 };
1725
1726 auto *Dest = CallArgs[0];
1727 Value *DestBase, *DestOffset;
1728 std::tie(DestBase, DestOffset) = GetBaseAndOffset(Dest);
1729
1730 auto *Source = CallArgs[1];
1731 Value *SourceBase, *SourceOffset;
1732 std::tie(SourceBase, SourceOffset) = GetBaseAndOffset(Source);
1733
1734 auto *LengthInBytes = CallArgs[2];
1735 auto *ElementSizeCI = cast<ConstantInt>(CallArgs[3]);
1736
1737 CallArgs.clear();
1738 CallArgs.push_back(DestBase);
1739 CallArgs.push_back(DestOffset);
1740 CallArgs.push_back(SourceBase);
1741 CallArgs.push_back(SourceOffset);
1742 CallArgs.push_back(LengthInBytes);
1743
1744 SmallVector<Type *, 8> DomainTy;
1745 for (Value *Arg : CallArgs)
1746 DomainTy.push_back(Arg->getType());
1747 auto *FTy = FunctionType::get(Type::getVoidTy(F->getContext()), DomainTy,
1748 /* isVarArg = */ false);
1749
1750 auto GetFunctionName = [](Intrinsic::ID IID, ConstantInt *ElementSizeCI) {
1751 uint64_t ElementSize = ElementSizeCI->getZExtValue();
1752 if (IID == Intrinsic::memcpy_element_unordered_atomic) {
1753 switch (ElementSize) {
1754 case 1:
1755 return "__llvm_memcpy_element_unordered_atomic_safepoint_1";
1756 case 2:
1757 return "__llvm_memcpy_element_unordered_atomic_safepoint_2";
1758 case 4:
1759 return "__llvm_memcpy_element_unordered_atomic_safepoint_4";
1760 case 8:
1761 return "__llvm_memcpy_element_unordered_atomic_safepoint_8";
1762 case 16:
1763 return "__llvm_memcpy_element_unordered_atomic_safepoint_16";
1764 default:
1765 llvm_unreachable("unexpected element size!");
1766 }
1767 }
1768 assert(IID == Intrinsic::memmove_element_unordered_atomic);
1769 switch (ElementSize) {
1770 case 1:
1771 return "__llvm_memmove_element_unordered_atomic_safepoint_1";
1772 case 2:
1773 return "__llvm_memmove_element_unordered_atomic_safepoint_2";
1774 case 4:
1775 return "__llvm_memmove_element_unordered_atomic_safepoint_4";
1776 case 8:
1777 return "__llvm_memmove_element_unordered_atomic_safepoint_8";
1778 case 16:
1779 return "__llvm_memmove_element_unordered_atomic_safepoint_16";
1780 default:
1781 llvm_unreachable("unexpected element size!");
1782 }
1783 };
1784
1785 CallTarget =
1786 F->getParent()
1787 ->getOrInsertFunction(GetFunctionName(IID, ElementSizeCI), FTy);
1788 }
1789 }
1790
1791 // Create the statepoint given all the arguments
1792 GCStatepointInst *Token = nullptr;
1793 if (auto *CI = dyn_cast<CallInst>(Call)) {
1794 CallInst *SPCall = Builder.CreateGCStatepointCall(
1795 StatepointID, NumPatchBytes, CallTarget, Flags, CallArgs,
1796 TransitionArgs, DeoptArgs, GCArgs, "safepoint_token");
1797
1798 SPCall->setTailCallKind(CI->getTailCallKind());
1799 SPCall->setCallingConv(CI->getCallingConv());
1800
1801 // Currently we will fail on parameter attributes and on certain
1802 // function attributes. In case if we can handle this set of attributes -
1803 // set up function attrs directly on statepoint and return attrs later for
1804 // gc_result intrinsic.
1805 SPCall->setAttributes(legalizeCallAttributes(
1806 CI->getContext(), CI->getAttributes(), SPCall->getAttributes()));
1807
1808 Token = cast<GCStatepointInst>(SPCall);
1809
1810 // Put the following gc_result and gc_relocate calls immediately after the
1811 // the old call (which we're about to delete)
1812 assert(CI->getNextNode() && "Not a terminator, must have next!");
1813 Builder.SetInsertPoint(CI->getNextNode());
1814 Builder.SetCurrentDebugLocation(CI->getNextNode()->getDebugLoc());
1815 } else {
1816 auto *II = cast<InvokeInst>(Call);
1817
1818 // Insert the new invoke into the old block. We'll remove the old one in a
1819 // moment at which point this will become the new terminator for the
1820 // original block.
1821 InvokeInst *SPInvoke = Builder.CreateGCStatepointInvoke(
1822 StatepointID, NumPatchBytes, CallTarget, II->getNormalDest(),
1823 II->getUnwindDest(), Flags, CallArgs, TransitionArgs, DeoptArgs, GCArgs,
1824 "statepoint_token");
1825
1826 SPInvoke->setCallingConv(II->getCallingConv());
1827
1828 // Currently we will fail on parameter attributes and on certain
1829 // function attributes. In case if we can handle this set of attributes -
1830 // set up function attrs directly on statepoint and return attrs later for
1831 // gc_result intrinsic.
1832 SPInvoke->setAttributes(legalizeCallAttributes(
1833 II->getContext(), II->getAttributes(), SPInvoke->getAttributes()));
1834
1835 Token = cast<GCStatepointInst>(SPInvoke);
1836
1837 // Generate gc relocates in exceptional path
1838 BasicBlock *UnwindBlock = II->getUnwindDest();
1839 assert(!isa<PHINode>(UnwindBlock->begin()) &&
1840 UnwindBlock->getUniquePredecessor() &&
1841 "can't safely insert in this block!");
1842
1843 Builder.SetInsertPoint(&*UnwindBlock->getFirstInsertionPt());
1844 Builder.SetCurrentDebugLocation(II->getDebugLoc());
1845
1846 // Attach exceptional gc relocates to the landingpad.
1847 Instruction *ExceptionalToken = UnwindBlock->getLandingPadInst();
1848 Result.UnwindToken = ExceptionalToken;
1849
1850 CreateGCRelocates(LiveVariables, BasePtrs, ExceptionalToken, Builder);
1851
1852 // Generate gc relocates and returns for normal block
1853 BasicBlock *NormalDest = II->getNormalDest();
1854 assert(!isa<PHINode>(NormalDest->begin()) &&
1855 NormalDest->getUniquePredecessor() &&
1856 "can't safely insert in this block!");
1857
1858 Builder.SetInsertPoint(&*NormalDest->getFirstInsertionPt());
1859
1860 // gc relocates will be generated later as if it were regular call
1861 // statepoint
1862 }
1863 assert(Token && "Should be set in one of the above branches!");
1864
1865 if (IsDeoptimize) {
1866 // If we're wrapping an @llvm.experimental.deoptimize in a statepoint, we
1867 // transform the tail-call like structure to a call to a void function
1868 // followed by unreachable to get better codegen.
1869 Replacements.push_back(
1870 DeferredReplacement::createDeoptimizeReplacement(Call));
1871 } else {
1872 Token->setName("statepoint_token");
1873 if (!Call->getType()->isVoidTy() && !Call->use_empty()) {
1874 StringRef Name = Call->hasName() ? Call->getName() : "";
1875 CallInst *GCResult = Builder.CreateGCResult(Token, Call->getType(), Name);
1876 GCResult->setAttributes(
1877 AttributeList::get(GCResult->getContext(), AttributeList::ReturnIndex,
1878 Call->getAttributes().getRetAttrs()));
1879
1880 // We cannot RAUW or delete CS.getInstruction() because it could be in the
1881 // live set of some other safepoint, in which case that safepoint's
1882 // PartiallyConstructedSafepointRecord will hold a raw pointer to this
1883 // llvm::Instruction. Instead, we defer the replacement and deletion to
1884 // after the live sets have been made explicit in the IR, and we no longer
1885 // have raw pointers to worry about.
1886 Replacements.emplace_back(
1887 DeferredReplacement::createRAUW(Call, GCResult));
1888 } else {
1889 Replacements.emplace_back(DeferredReplacement::createDelete(Call));
1890 }
1891 }
1892
1893 Result.StatepointToken = Token;
1894
1895 // Second, create a gc.relocate for every live variable
1896 CreateGCRelocates(LiveVariables, BasePtrs, Token, Builder);
1897 }
1898
1899 // Replace an existing gc.statepoint with a new one and a set of gc.relocates
1900 // which make the relocations happening at this safepoint explicit.
1901 //
1902 // WARNING: Does not do any fixup to adjust users of the original live
1903 // values. That's the callers responsibility.
1904 static void
makeStatepointExplicit(DominatorTree & DT,CallBase * Call,PartiallyConstructedSafepointRecord & Result,std::vector<DeferredReplacement> & Replacements,const PointerToBaseTy & PointerToBase)1905 makeStatepointExplicit(DominatorTree &DT, CallBase *Call,
1906 PartiallyConstructedSafepointRecord &Result,
1907 std::vector<DeferredReplacement> &Replacements,
1908 const PointerToBaseTy &PointerToBase) {
1909 const auto &LiveSet = Result.LiveSet;
1910
1911 // Convert to vector for efficient cross referencing.
1912 SmallVector<Value *, 64> BaseVec, LiveVec;
1913 LiveVec.reserve(LiveSet.size());
1914 BaseVec.reserve(LiveSet.size());
1915 for (Value *L : LiveSet) {
1916 LiveVec.push_back(L);
1917 assert(PointerToBase.count(L));
1918 Value *Base = PointerToBase.find(L)->second;
1919 BaseVec.push_back(Base);
1920 }
1921 assert(LiveVec.size() == BaseVec.size());
1922
1923 // Do the actual rewriting and delete the old statepoint
1924 makeStatepointExplicitImpl(Call, BaseVec, LiveVec, Result, Replacements,
1925 PointerToBase);
1926 }
1927
1928 // Helper function for the relocationViaAlloca.
1929 //
1930 // It receives iterator to the statepoint gc relocates and emits a store to the
1931 // assigned location (via allocaMap) for the each one of them. It adds the
1932 // visited values into the visitedLiveValues set, which we will later use them
1933 // for validation checking.
1934 static void
insertRelocationStores(iterator_range<Value::user_iterator> GCRelocs,DenseMap<Value *,AllocaInst * > & AllocaMap,DenseSet<Value * > & VisitedLiveValues)1935 insertRelocationStores(iterator_range<Value::user_iterator> GCRelocs,
1936 DenseMap<Value *, AllocaInst *> &AllocaMap,
1937 DenseSet<Value *> &VisitedLiveValues) {
1938 for (User *U : GCRelocs) {
1939 GCRelocateInst *Relocate = dyn_cast<GCRelocateInst>(U);
1940 if (!Relocate)
1941 continue;
1942
1943 Value *OriginalValue = Relocate->getDerivedPtr();
1944 assert(AllocaMap.count(OriginalValue));
1945 Value *Alloca = AllocaMap[OriginalValue];
1946
1947 // Emit store into the related alloca
1948 // All gc_relocates are i8 addrspace(1)* typed, and it must be bitcasted to
1949 // the correct type according to alloca.
1950 assert(Relocate->getNextNode() &&
1951 "Should always have one since it's not a terminator");
1952 IRBuilder<> Builder(Relocate->getNextNode());
1953 Value *CastedRelocatedValue =
1954 Builder.CreateBitCast(Relocate,
1955 cast<AllocaInst>(Alloca)->getAllocatedType(),
1956 suffixed_name_or(Relocate, ".casted", ""));
1957
1958 new StoreInst(CastedRelocatedValue, Alloca,
1959 cast<Instruction>(CastedRelocatedValue)->getNextNode());
1960
1961 #ifndef NDEBUG
1962 VisitedLiveValues.insert(OriginalValue);
1963 #endif
1964 }
1965 }
1966
1967 // Helper function for the "relocationViaAlloca". Similar to the
1968 // "insertRelocationStores" but works for rematerialized values.
insertRematerializationStores(const RematerializedValueMapTy & RematerializedValues,DenseMap<Value *,AllocaInst * > & AllocaMap,DenseSet<Value * > & VisitedLiveValues)1969 static void insertRematerializationStores(
1970 const RematerializedValueMapTy &RematerializedValues,
1971 DenseMap<Value *, AllocaInst *> &AllocaMap,
1972 DenseSet<Value *> &VisitedLiveValues) {
1973 for (auto RematerializedValuePair: RematerializedValues) {
1974 Instruction *RematerializedValue = RematerializedValuePair.first;
1975 Value *OriginalValue = RematerializedValuePair.second;
1976
1977 assert(AllocaMap.count(OriginalValue) &&
1978 "Can not find alloca for rematerialized value");
1979 Value *Alloca = AllocaMap[OriginalValue];
1980
1981 new StoreInst(RematerializedValue, Alloca,
1982 RematerializedValue->getNextNode());
1983
1984 #ifndef NDEBUG
1985 VisitedLiveValues.insert(OriginalValue);
1986 #endif
1987 }
1988 }
1989
1990 /// Do all the relocation update via allocas and mem2reg
relocationViaAlloca(Function & F,DominatorTree & DT,ArrayRef<Value * > Live,ArrayRef<PartiallyConstructedSafepointRecord> Records)1991 static void relocationViaAlloca(
1992 Function &F, DominatorTree &DT, ArrayRef<Value *> Live,
1993 ArrayRef<PartiallyConstructedSafepointRecord> Records) {
1994 #ifndef NDEBUG
1995 // record initial number of (static) allocas; we'll check we have the same
1996 // number when we get done.
1997 int InitialAllocaNum = 0;
1998 for (Instruction &I : F.getEntryBlock())
1999 if (isa<AllocaInst>(I))
2000 InitialAllocaNum++;
2001 #endif
2002
2003 // TODO-PERF: change data structures, reserve
2004 DenseMap<Value *, AllocaInst *> AllocaMap;
2005 SmallVector<AllocaInst *, 200> PromotableAllocas;
2006 // Used later to chack that we have enough allocas to store all values
2007 std::size_t NumRematerializedValues = 0;
2008 PromotableAllocas.reserve(Live.size());
2009
2010 // Emit alloca for "LiveValue" and record it in "allocaMap" and
2011 // "PromotableAllocas"
2012 const DataLayout &DL = F.getParent()->getDataLayout();
2013 auto emitAllocaFor = [&](Value *LiveValue) {
2014 AllocaInst *Alloca = new AllocaInst(LiveValue->getType(),
2015 DL.getAllocaAddrSpace(), "",
2016 F.getEntryBlock().getFirstNonPHI());
2017 AllocaMap[LiveValue] = Alloca;
2018 PromotableAllocas.push_back(Alloca);
2019 };
2020
2021 // Emit alloca for each live gc pointer
2022 for (Value *V : Live)
2023 emitAllocaFor(V);
2024
2025 // Emit allocas for rematerialized values
2026 for (const auto &Info : Records)
2027 for (auto RematerializedValuePair : Info.RematerializedValues) {
2028 Value *OriginalValue = RematerializedValuePair.second;
2029 if (AllocaMap.count(OriginalValue) != 0)
2030 continue;
2031
2032 emitAllocaFor(OriginalValue);
2033 ++NumRematerializedValues;
2034 }
2035
2036 // The next two loops are part of the same conceptual operation. We need to
2037 // insert a store to the alloca after the original def and at each
2038 // redefinition. We need to insert a load before each use. These are split
2039 // into distinct loops for performance reasons.
2040
2041 // Update gc pointer after each statepoint: either store a relocated value or
2042 // null (if no relocated value was found for this gc pointer and it is not a
2043 // gc_result). This must happen before we update the statepoint with load of
2044 // alloca otherwise we lose the link between statepoint and old def.
2045 for (const auto &Info : Records) {
2046 Value *Statepoint = Info.StatepointToken;
2047
2048 // This will be used for consistency check
2049 DenseSet<Value *> VisitedLiveValues;
2050
2051 // Insert stores for normal statepoint gc relocates
2052 insertRelocationStores(Statepoint->users(), AllocaMap, VisitedLiveValues);
2053
2054 // In case if it was invoke statepoint
2055 // we will insert stores for exceptional path gc relocates.
2056 if (isa<InvokeInst>(Statepoint)) {
2057 insertRelocationStores(Info.UnwindToken->users(), AllocaMap,
2058 VisitedLiveValues);
2059 }
2060
2061 // Do similar thing with rematerialized values
2062 insertRematerializationStores(Info.RematerializedValues, AllocaMap,
2063 VisitedLiveValues);
2064
2065 if (ClobberNonLive) {
2066 // As a debugging aid, pretend that an unrelocated pointer becomes null at
2067 // the gc.statepoint. This will turn some subtle GC problems into
2068 // slightly easier to debug SEGVs. Note that on large IR files with
2069 // lots of gc.statepoints this is extremely costly both memory and time
2070 // wise.
2071 SmallVector<AllocaInst *, 64> ToClobber;
2072 for (auto Pair : AllocaMap) {
2073 Value *Def = Pair.first;
2074 AllocaInst *Alloca = Pair.second;
2075
2076 // This value was relocated
2077 if (VisitedLiveValues.count(Def)) {
2078 continue;
2079 }
2080 ToClobber.push_back(Alloca);
2081 }
2082
2083 auto InsertClobbersAt = [&](Instruction *IP) {
2084 for (auto *AI : ToClobber) {
2085 auto PT = cast<PointerType>(AI->getAllocatedType());
2086 Constant *CPN = ConstantPointerNull::get(PT);
2087 new StoreInst(CPN, AI, IP);
2088 }
2089 };
2090
2091 // Insert the clobbering stores. These may get intermixed with the
2092 // gc.results and gc.relocates, but that's fine.
2093 if (auto II = dyn_cast<InvokeInst>(Statepoint)) {
2094 InsertClobbersAt(&*II->getNormalDest()->getFirstInsertionPt());
2095 InsertClobbersAt(&*II->getUnwindDest()->getFirstInsertionPt());
2096 } else {
2097 InsertClobbersAt(cast<Instruction>(Statepoint)->getNextNode());
2098 }
2099 }
2100 }
2101
2102 // Update use with load allocas and add store for gc_relocated.
2103 for (auto Pair : AllocaMap) {
2104 Value *Def = Pair.first;
2105 AllocaInst *Alloca = Pair.second;
2106
2107 // We pre-record the uses of allocas so that we dont have to worry about
2108 // later update that changes the user information..
2109
2110 SmallVector<Instruction *, 20> Uses;
2111 // PERF: trade a linear scan for repeated reallocation
2112 Uses.reserve(Def->getNumUses());
2113 for (User *U : Def->users()) {
2114 if (!isa<ConstantExpr>(U)) {
2115 // If the def has a ConstantExpr use, then the def is either a
2116 // ConstantExpr use itself or null. In either case
2117 // (recursively in the first, directly in the second), the oop
2118 // it is ultimately dependent on is null and this particular
2119 // use does not need to be fixed up.
2120 Uses.push_back(cast<Instruction>(U));
2121 }
2122 }
2123
2124 llvm::sort(Uses);
2125 auto Last = std::unique(Uses.begin(), Uses.end());
2126 Uses.erase(Last, Uses.end());
2127
2128 for (Instruction *Use : Uses) {
2129 if (isa<PHINode>(Use)) {
2130 PHINode *Phi = cast<PHINode>(Use);
2131 for (unsigned i = 0; i < Phi->getNumIncomingValues(); i++) {
2132 if (Def == Phi->getIncomingValue(i)) {
2133 LoadInst *Load =
2134 new LoadInst(Alloca->getAllocatedType(), Alloca, "",
2135 Phi->getIncomingBlock(i)->getTerminator());
2136 Phi->setIncomingValue(i, Load);
2137 }
2138 }
2139 } else {
2140 LoadInst *Load =
2141 new LoadInst(Alloca->getAllocatedType(), Alloca, "", Use);
2142 Use->replaceUsesOfWith(Def, Load);
2143 }
2144 }
2145
2146 // Emit store for the initial gc value. Store must be inserted after load,
2147 // otherwise store will be in alloca's use list and an extra load will be
2148 // inserted before it.
2149 StoreInst *Store = new StoreInst(Def, Alloca, /*volatile*/ false,
2150 DL.getABITypeAlign(Def->getType()));
2151 if (Instruction *Inst = dyn_cast<Instruction>(Def)) {
2152 if (InvokeInst *Invoke = dyn_cast<InvokeInst>(Inst)) {
2153 // InvokeInst is a terminator so the store need to be inserted into its
2154 // normal destination block.
2155 BasicBlock *NormalDest = Invoke->getNormalDest();
2156 Store->insertBefore(NormalDest->getFirstNonPHI());
2157 } else {
2158 assert(!Inst->isTerminator() &&
2159 "The only terminator that can produce a value is "
2160 "InvokeInst which is handled above.");
2161 Store->insertAfter(Inst);
2162 }
2163 } else {
2164 assert(isa<Argument>(Def));
2165 Store->insertAfter(cast<Instruction>(Alloca));
2166 }
2167 }
2168
2169 assert(PromotableAllocas.size() == Live.size() + NumRematerializedValues &&
2170 "we must have the same allocas with lives");
2171 (void) NumRematerializedValues;
2172 if (!PromotableAllocas.empty()) {
2173 // Apply mem2reg to promote alloca to SSA
2174 PromoteMemToReg(PromotableAllocas, DT);
2175 }
2176
2177 #ifndef NDEBUG
2178 for (auto &I : F.getEntryBlock())
2179 if (isa<AllocaInst>(I))
2180 InitialAllocaNum--;
2181 assert(InitialAllocaNum == 0 && "We must not introduce any extra allocas");
2182 #endif
2183 }
2184
2185 /// Implement a unique function which doesn't require we sort the input
2186 /// vector. Doing so has the effect of changing the output of a couple of
2187 /// tests in ways which make them less useful in testing fused safepoints.
unique_unsorted(SmallVectorImpl<T> & Vec)2188 template <typename T> static void unique_unsorted(SmallVectorImpl<T> &Vec) {
2189 SmallSet<T, 8> Seen;
2190 erase_if(Vec, [&](const T &V) { return !Seen.insert(V).second; });
2191 }
2192
2193 /// Insert holders so that each Value is obviously live through the entire
2194 /// lifetime of the call.
insertUseHolderAfter(CallBase * Call,const ArrayRef<Value * > Values,SmallVectorImpl<CallInst * > & Holders)2195 static void insertUseHolderAfter(CallBase *Call, const ArrayRef<Value *> Values,
2196 SmallVectorImpl<CallInst *> &Holders) {
2197 if (Values.empty())
2198 // No values to hold live, might as well not insert the empty holder
2199 return;
2200
2201 Module *M = Call->getModule();
2202 // Use a dummy vararg function to actually hold the values live
2203 FunctionCallee Func = M->getOrInsertFunction(
2204 "__tmp_use", FunctionType::get(Type::getVoidTy(M->getContext()), true));
2205 if (isa<CallInst>(Call)) {
2206 // For call safepoints insert dummy calls right after safepoint
2207 Holders.push_back(
2208 CallInst::Create(Func, Values, "", &*++Call->getIterator()));
2209 return;
2210 }
2211 // For invoke safepooints insert dummy calls both in normal and
2212 // exceptional destination blocks
2213 auto *II = cast<InvokeInst>(Call);
2214 Holders.push_back(CallInst::Create(
2215 Func, Values, "", &*II->getNormalDest()->getFirstInsertionPt()));
2216 Holders.push_back(CallInst::Create(
2217 Func, Values, "", &*II->getUnwindDest()->getFirstInsertionPt()));
2218 }
2219
findLiveReferences(Function & F,DominatorTree & DT,ArrayRef<CallBase * > toUpdate,MutableArrayRef<struct PartiallyConstructedSafepointRecord> records)2220 static void findLiveReferences(
2221 Function &F, DominatorTree &DT, ArrayRef<CallBase *> toUpdate,
2222 MutableArrayRef<struct PartiallyConstructedSafepointRecord> records) {
2223 GCPtrLivenessData OriginalLivenessData;
2224 computeLiveInValues(DT, F, OriginalLivenessData);
2225 for (size_t i = 0; i < records.size(); i++) {
2226 struct PartiallyConstructedSafepointRecord &info = records[i];
2227 analyzeParsePointLiveness(DT, OriginalLivenessData, toUpdate[i], info);
2228 }
2229 }
2230
2231 // Helper function for the "rematerializeLiveValues". It walks use chain
2232 // starting from the "CurrentValue" until it reaches the root of the chain, i.e.
2233 // the base or a value it cannot process. Only "simple" values are processed
2234 // (currently it is GEP's and casts). The returned root is examined by the
2235 // callers of findRematerializableChainToBasePointer. Fills "ChainToBase" array
2236 // with all visited values.
findRematerializableChainToBasePointer(SmallVectorImpl<Instruction * > & ChainToBase,Value * CurrentValue)2237 static Value* findRematerializableChainToBasePointer(
2238 SmallVectorImpl<Instruction*> &ChainToBase,
2239 Value *CurrentValue) {
2240 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(CurrentValue)) {
2241 ChainToBase.push_back(GEP);
2242 return findRematerializableChainToBasePointer(ChainToBase,
2243 GEP->getPointerOperand());
2244 }
2245
2246 if (CastInst *CI = dyn_cast<CastInst>(CurrentValue)) {
2247 if (!CI->isNoopCast(CI->getModule()->getDataLayout()))
2248 return CI;
2249
2250 ChainToBase.push_back(CI);
2251 return findRematerializableChainToBasePointer(ChainToBase,
2252 CI->getOperand(0));
2253 }
2254
2255 // We have reached the root of the chain, which is either equal to the base or
2256 // is the first unsupported value along the use chain.
2257 return CurrentValue;
2258 }
2259
2260 // Helper function for the "rematerializeLiveValues". Compute cost of the use
2261 // chain we are going to rematerialize.
2262 static InstructionCost
chainToBasePointerCost(SmallVectorImpl<Instruction * > & Chain,TargetTransformInfo & TTI)2263 chainToBasePointerCost(SmallVectorImpl<Instruction *> &Chain,
2264 TargetTransformInfo &TTI) {
2265 InstructionCost Cost = 0;
2266
2267 for (Instruction *Instr : Chain) {
2268 if (CastInst *CI = dyn_cast<CastInst>(Instr)) {
2269 assert(CI->isNoopCast(CI->getModule()->getDataLayout()) &&
2270 "non noop cast is found during rematerialization");
2271
2272 Type *SrcTy = CI->getOperand(0)->getType();
2273 Cost += TTI.getCastInstrCost(CI->getOpcode(), CI->getType(), SrcTy,
2274 TTI::getCastContextHint(CI),
2275 TargetTransformInfo::TCK_SizeAndLatency, CI);
2276
2277 } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Instr)) {
2278 // Cost of the address calculation
2279 Type *ValTy = GEP->getSourceElementType();
2280 Cost += TTI.getAddressComputationCost(ValTy);
2281
2282 // And cost of the GEP itself
2283 // TODO: Use TTI->getGEPCost here (it exists, but appears to be not
2284 // allowed for the external usage)
2285 if (!GEP->hasAllConstantIndices())
2286 Cost += 2;
2287
2288 } else {
2289 llvm_unreachable("unsupported instruction type during rematerialization");
2290 }
2291 }
2292
2293 return Cost;
2294 }
2295
AreEquivalentPhiNodes(PHINode & OrigRootPhi,PHINode & AlternateRootPhi)2296 static bool AreEquivalentPhiNodes(PHINode &OrigRootPhi, PHINode &AlternateRootPhi) {
2297 unsigned PhiNum = OrigRootPhi.getNumIncomingValues();
2298 if (PhiNum != AlternateRootPhi.getNumIncomingValues() ||
2299 OrigRootPhi.getParent() != AlternateRootPhi.getParent())
2300 return false;
2301 // Map of incoming values and their corresponding basic blocks of
2302 // OrigRootPhi.
2303 SmallDenseMap<Value *, BasicBlock *, 8> CurrentIncomingValues;
2304 for (unsigned i = 0; i < PhiNum; i++)
2305 CurrentIncomingValues[OrigRootPhi.getIncomingValue(i)] =
2306 OrigRootPhi.getIncomingBlock(i);
2307
2308 // Both current and base PHIs should have same incoming values and
2309 // the same basic blocks corresponding to the incoming values.
2310 for (unsigned i = 0; i < PhiNum; i++) {
2311 auto CIVI =
2312 CurrentIncomingValues.find(AlternateRootPhi.getIncomingValue(i));
2313 if (CIVI == CurrentIncomingValues.end())
2314 return false;
2315 BasicBlock *CurrentIncomingBB = CIVI->second;
2316 if (CurrentIncomingBB != AlternateRootPhi.getIncomingBlock(i))
2317 return false;
2318 }
2319 return true;
2320 }
2321
2322 // Find derived pointers that can be recomputed cheap enough and fill
2323 // RematerizationCandidates with such candidates.
2324 static void
findRematerializationCandidates(PointerToBaseTy PointerToBase,RematCandTy & RematerizationCandidates,TargetTransformInfo & TTI)2325 findRematerializationCandidates(PointerToBaseTy PointerToBase,
2326 RematCandTy &RematerizationCandidates,
2327 TargetTransformInfo &TTI) {
2328 const unsigned int ChainLengthThreshold = 10;
2329
2330 for (auto P2B : PointerToBase) {
2331 auto *Derived = P2B.first;
2332 auto *Base = P2B.second;
2333 // Consider only derived pointers.
2334 if (Derived == Base)
2335 continue;
2336
2337 // For each live pointer find its defining chain.
2338 SmallVector<Instruction *, 3> ChainToBase;
2339 Value *RootOfChain =
2340 findRematerializableChainToBasePointer(ChainToBase, Derived);
2341
2342 // Nothing to do, or chain is too long
2343 if ( ChainToBase.size() == 0 ||
2344 ChainToBase.size() > ChainLengthThreshold)
2345 continue;
2346
2347 // Handle the scenario where the RootOfChain is not equal to the
2348 // Base Value, but they are essentially the same phi values.
2349 if (RootOfChain != PointerToBase[Derived]) {
2350 PHINode *OrigRootPhi = dyn_cast<PHINode>(RootOfChain);
2351 PHINode *AlternateRootPhi = dyn_cast<PHINode>(PointerToBase[Derived]);
2352 if (!OrigRootPhi || !AlternateRootPhi)
2353 continue;
2354 // PHI nodes that have the same incoming values, and belonging to the same
2355 // basic blocks are essentially the same SSA value. When the original phi
2356 // has incoming values with different base pointers, the original phi is
2357 // marked as conflict, and an additional `AlternateRootPhi` with the same
2358 // incoming values get generated by the findBasePointer function. We need
2359 // to identify the newly generated AlternateRootPhi (.base version of phi)
2360 // and RootOfChain (the original phi node itself) are the same, so that we
2361 // can rematerialize the gep and casts. This is a workaround for the
2362 // deficiency in the findBasePointer algorithm.
2363 if (!AreEquivalentPhiNodes(*OrigRootPhi, *AlternateRootPhi))
2364 continue;
2365 }
2366 // Compute cost of this chain.
2367 InstructionCost Cost = chainToBasePointerCost(ChainToBase, TTI);
2368 // TODO: We can also account for cases when we will be able to remove some
2369 // of the rematerialized values by later optimization passes. I.e if
2370 // we rematerialized several intersecting chains. Or if original values
2371 // don't have any uses besides this statepoint.
2372
2373 // Ok, there is a candidate.
2374 RematerizlizationCandidateRecord Record;
2375 Record.ChainToBase = ChainToBase;
2376 Record.RootOfChain = RootOfChain;
2377 Record.Cost = Cost;
2378 RematerizationCandidates.insert({ Derived, Record });
2379 }
2380 }
2381
2382 // From the statepoint live set pick values that are cheaper to recompute then
2383 // to relocate. Remove this values from the live set, rematerialize them after
2384 // statepoint and record them in "Info" structure. Note that similar to
2385 // relocated values we don't do any user adjustments here.
rematerializeLiveValues(CallBase * Call,PartiallyConstructedSafepointRecord & Info,PointerToBaseTy & PointerToBase,RematCandTy & RematerizationCandidates,TargetTransformInfo & TTI)2386 static void rematerializeLiveValues(CallBase *Call,
2387 PartiallyConstructedSafepointRecord &Info,
2388 PointerToBaseTy &PointerToBase,
2389 RematCandTy &RematerizationCandidates,
2390 TargetTransformInfo &TTI) {
2391 // Record values we are going to delete from this statepoint live set.
2392 // We can not di this in following loop due to iterator invalidation.
2393 SmallVector<Value *, 32> LiveValuesToBeDeleted;
2394
2395 for (Value *LiveValue : Info.LiveSet) {
2396 auto It = RematerizationCandidates.find(LiveValue);
2397 if (It == RematerizationCandidates.end())
2398 continue;
2399
2400 RematerizlizationCandidateRecord &Record = It->second;
2401
2402 InstructionCost Cost = Record.Cost;
2403 // For invokes we need to rematerialize each chain twice - for normal and
2404 // for unwind basic blocks. Model this by multiplying cost by two.
2405 if (isa<InvokeInst>(Call))
2406 Cost *= 2;
2407
2408 // If it's too expensive - skip it.
2409 if (Cost >= RematerializationThreshold)
2410 continue;
2411
2412 // Remove value from the live set
2413 LiveValuesToBeDeleted.push_back(LiveValue);
2414
2415 // Clone instructions and record them inside "Info" structure.
2416
2417 // For each live pointer find get its defining chain.
2418 SmallVector<Instruction *, 3> ChainToBase = Record.ChainToBase;
2419 // Walk backwards to visit top-most instructions first.
2420 std::reverse(ChainToBase.begin(), ChainToBase.end());
2421
2422 // Utility function which clones all instructions from "ChainToBase"
2423 // and inserts them before "InsertBefore". Returns rematerialized value
2424 // which should be used after statepoint.
2425 auto rematerializeChain = [&ChainToBase](
2426 Instruction *InsertBefore, Value *RootOfChain, Value *AlternateLiveBase) {
2427 Instruction *LastClonedValue = nullptr;
2428 Instruction *LastValue = nullptr;
2429 for (Instruction *Instr: ChainToBase) {
2430 // Only GEP's and casts are supported as we need to be careful to not
2431 // introduce any new uses of pointers not in the liveset.
2432 // Note that it's fine to introduce new uses of pointers which were
2433 // otherwise not used after this statepoint.
2434 assert(isa<GetElementPtrInst>(Instr) || isa<CastInst>(Instr));
2435
2436 Instruction *ClonedValue = Instr->clone();
2437 ClonedValue->insertBefore(InsertBefore);
2438 ClonedValue->setName(Instr->getName() + ".remat");
2439
2440 // If it is not first instruction in the chain then it uses previously
2441 // cloned value. We should update it to use cloned value.
2442 if (LastClonedValue) {
2443 assert(LastValue);
2444 ClonedValue->replaceUsesOfWith(LastValue, LastClonedValue);
2445 #ifndef NDEBUG
2446 for (auto OpValue : ClonedValue->operand_values()) {
2447 // Assert that cloned instruction does not use any instructions from
2448 // this chain other than LastClonedValue
2449 assert(!is_contained(ChainToBase, OpValue) &&
2450 "incorrect use in rematerialization chain");
2451 // Assert that the cloned instruction does not use the RootOfChain
2452 // or the AlternateLiveBase.
2453 assert(OpValue != RootOfChain && OpValue != AlternateLiveBase);
2454 }
2455 #endif
2456 } else {
2457 // For the first instruction, replace the use of unrelocated base i.e.
2458 // RootOfChain/OrigRootPhi, with the corresponding PHI present in the
2459 // live set. They have been proved to be the same PHI nodes. Note
2460 // that the *only* use of the RootOfChain in the ChainToBase list is
2461 // the first Value in the list.
2462 if (RootOfChain != AlternateLiveBase)
2463 ClonedValue->replaceUsesOfWith(RootOfChain, AlternateLiveBase);
2464 }
2465
2466 LastClonedValue = ClonedValue;
2467 LastValue = Instr;
2468 }
2469 assert(LastClonedValue);
2470 return LastClonedValue;
2471 };
2472
2473 // Different cases for calls and invokes. For invokes we need to clone
2474 // instructions both on normal and unwind path.
2475 if (isa<CallInst>(Call)) {
2476 Instruction *InsertBefore = Call->getNextNode();
2477 assert(InsertBefore);
2478 Instruction *RematerializedValue = rematerializeChain(
2479 InsertBefore, Record.RootOfChain, PointerToBase[LiveValue]);
2480 Info.RematerializedValues[RematerializedValue] = LiveValue;
2481 } else {
2482 auto *Invoke = cast<InvokeInst>(Call);
2483
2484 Instruction *NormalInsertBefore =
2485 &*Invoke->getNormalDest()->getFirstInsertionPt();
2486 Instruction *UnwindInsertBefore =
2487 &*Invoke->getUnwindDest()->getFirstInsertionPt();
2488
2489 Instruction *NormalRematerializedValue = rematerializeChain(
2490 NormalInsertBefore, Record.RootOfChain, PointerToBase[LiveValue]);
2491 Instruction *UnwindRematerializedValue = rematerializeChain(
2492 UnwindInsertBefore, Record.RootOfChain, PointerToBase[LiveValue]);
2493
2494 Info.RematerializedValues[NormalRematerializedValue] = LiveValue;
2495 Info.RematerializedValues[UnwindRematerializedValue] = LiveValue;
2496 }
2497 }
2498
2499 // Remove rematerializaed values from the live set
2500 for (auto LiveValue: LiveValuesToBeDeleted) {
2501 Info.LiveSet.remove(LiveValue);
2502 }
2503 }
2504
inlineGetBaseAndOffset(Function & F,SmallVectorImpl<CallInst * > & Intrinsics,DefiningValueMapTy & DVCache,IsKnownBaseMapTy & KnownBases)2505 static bool inlineGetBaseAndOffset(Function &F,
2506 SmallVectorImpl<CallInst *> &Intrinsics,
2507 DefiningValueMapTy &DVCache,
2508 IsKnownBaseMapTy &KnownBases) {
2509 auto &Context = F.getContext();
2510 auto &DL = F.getParent()->getDataLayout();
2511 bool Changed = false;
2512
2513 for (auto *Callsite : Intrinsics)
2514 switch (Callsite->getIntrinsicID()) {
2515 case Intrinsic::experimental_gc_get_pointer_base: {
2516 Changed = true;
2517 Value *Base =
2518 findBasePointer(Callsite->getOperand(0), DVCache, KnownBases);
2519 assert(!DVCache.count(Callsite));
2520 auto *BaseBC = IRBuilder<>(Callsite).CreateBitCast(
2521 Base, Callsite->getType(), suffixed_name_or(Base, ".cast", ""));
2522 if (BaseBC != Base)
2523 DVCache[BaseBC] = Base;
2524 Callsite->replaceAllUsesWith(BaseBC);
2525 if (!BaseBC->hasName())
2526 BaseBC->takeName(Callsite);
2527 Callsite->eraseFromParent();
2528 break;
2529 }
2530 case Intrinsic::experimental_gc_get_pointer_offset: {
2531 Changed = true;
2532 Value *Derived = Callsite->getOperand(0);
2533 Value *Base = findBasePointer(Derived, DVCache, KnownBases);
2534 assert(!DVCache.count(Callsite));
2535 unsigned AddressSpace = Derived->getType()->getPointerAddressSpace();
2536 unsigned IntPtrSize = DL.getPointerSizeInBits(AddressSpace);
2537 IRBuilder<> Builder(Callsite);
2538 Value *BaseInt =
2539 Builder.CreatePtrToInt(Base, Type::getIntNTy(Context, IntPtrSize),
2540 suffixed_name_or(Base, ".int", ""));
2541 Value *DerivedInt =
2542 Builder.CreatePtrToInt(Derived, Type::getIntNTy(Context, IntPtrSize),
2543 suffixed_name_or(Derived, ".int", ""));
2544 Value *Offset = Builder.CreateSub(DerivedInt, BaseInt);
2545 Callsite->replaceAllUsesWith(Offset);
2546 Offset->takeName(Callsite);
2547 Callsite->eraseFromParent();
2548 break;
2549 }
2550 default:
2551 llvm_unreachable("Unknown intrinsic");
2552 }
2553
2554 return Changed;
2555 }
2556
insertParsePoints(Function & F,DominatorTree & DT,TargetTransformInfo & TTI,SmallVectorImpl<CallBase * > & ToUpdate,DefiningValueMapTy & DVCache,IsKnownBaseMapTy & KnownBases)2557 static bool insertParsePoints(Function &F, DominatorTree &DT,
2558 TargetTransformInfo &TTI,
2559 SmallVectorImpl<CallBase *> &ToUpdate,
2560 DefiningValueMapTy &DVCache,
2561 IsKnownBaseMapTy &KnownBases) {
2562 #ifndef NDEBUG
2563 // Validate the input
2564 std::set<CallBase *> Uniqued;
2565 Uniqued.insert(ToUpdate.begin(), ToUpdate.end());
2566 assert(Uniqued.size() == ToUpdate.size() && "no duplicates please!");
2567
2568 for (CallBase *Call : ToUpdate)
2569 assert(Call->getFunction() == &F);
2570 #endif
2571
2572 // When inserting gc.relocates for invokes, we need to be able to insert at
2573 // the top of the successor blocks. See the comment on
2574 // normalForInvokeSafepoint on exactly what is needed. Note that this step
2575 // may restructure the CFG.
2576 for (CallBase *Call : ToUpdate) {
2577 auto *II = dyn_cast<InvokeInst>(Call);
2578 if (!II)
2579 continue;
2580 normalizeForInvokeSafepoint(II->getNormalDest(), II->getParent(), DT);
2581 normalizeForInvokeSafepoint(II->getUnwindDest(), II->getParent(), DT);
2582 }
2583
2584 // A list of dummy calls added to the IR to keep various values obviously
2585 // live in the IR. We'll remove all of these when done.
2586 SmallVector<CallInst *, 64> Holders;
2587
2588 // Insert a dummy call with all of the deopt operands we'll need for the
2589 // actual safepoint insertion as arguments. This ensures reference operands
2590 // in the deopt argument list are considered live through the safepoint (and
2591 // thus makes sure they get relocated.)
2592 for (CallBase *Call : ToUpdate) {
2593 SmallVector<Value *, 64> DeoptValues;
2594
2595 for (Value *Arg : GetDeoptBundleOperands(Call)) {
2596 assert(!isUnhandledGCPointerType(Arg->getType()) &&
2597 "support for FCA unimplemented");
2598 if (isHandledGCPointerType(Arg->getType()))
2599 DeoptValues.push_back(Arg);
2600 }
2601
2602 insertUseHolderAfter(Call, DeoptValues, Holders);
2603 }
2604
2605 SmallVector<PartiallyConstructedSafepointRecord, 64> Records(ToUpdate.size());
2606
2607 // A) Identify all gc pointers which are statically live at the given call
2608 // site.
2609 findLiveReferences(F, DT, ToUpdate, Records);
2610
2611 /// Global mapping from live pointers to a base-defining-value.
2612 PointerToBaseTy PointerToBase;
2613
2614 // B) Find the base pointers for each live pointer
2615 for (size_t i = 0; i < Records.size(); i++) {
2616 PartiallyConstructedSafepointRecord &info = Records[i];
2617 findBasePointers(DT, DVCache, ToUpdate[i], info, PointerToBase, KnownBases);
2618 }
2619 if (PrintBasePointers) {
2620 errs() << "Base Pairs (w/o Relocation):\n";
2621 for (auto &Pair : PointerToBase) {
2622 errs() << " derived ";
2623 Pair.first->printAsOperand(errs(), false);
2624 errs() << " base ";
2625 Pair.second->printAsOperand(errs(), false);
2626 errs() << "\n";
2627 ;
2628 }
2629 }
2630
2631 // The base phi insertion logic (for any safepoint) may have inserted new
2632 // instructions which are now live at some safepoint. The simplest such
2633 // example is:
2634 // loop:
2635 // phi a <-- will be a new base_phi here
2636 // safepoint 1 <-- that needs to be live here
2637 // gep a + 1
2638 // safepoint 2
2639 // br loop
2640 // We insert some dummy calls after each safepoint to definitely hold live
2641 // the base pointers which were identified for that safepoint. We'll then
2642 // ask liveness for _every_ base inserted to see what is now live. Then we
2643 // remove the dummy calls.
2644 Holders.reserve(Holders.size() + Records.size());
2645 for (size_t i = 0; i < Records.size(); i++) {
2646 PartiallyConstructedSafepointRecord &Info = Records[i];
2647
2648 SmallVector<Value *, 128> Bases;
2649 for (auto *Derived : Info.LiveSet) {
2650 assert(PointerToBase.count(Derived) && "Missed base for derived pointer");
2651 Bases.push_back(PointerToBase[Derived]);
2652 }
2653
2654 insertUseHolderAfter(ToUpdate[i], Bases, Holders);
2655 }
2656
2657 // By selecting base pointers, we've effectively inserted new uses. Thus, we
2658 // need to rerun liveness. We may *also* have inserted new defs, but that's
2659 // not the key issue.
2660 recomputeLiveInValues(F, DT, ToUpdate, Records, PointerToBase);
2661
2662 if (PrintBasePointers) {
2663 errs() << "Base Pairs: (w/Relocation)\n";
2664 for (auto Pair : PointerToBase) {
2665 errs() << " derived ";
2666 Pair.first->printAsOperand(errs(), false);
2667 errs() << " base ";
2668 Pair.second->printAsOperand(errs(), false);
2669 errs() << "\n";
2670 }
2671 }
2672
2673 // It is possible that non-constant live variables have a constant base. For
2674 // example, a GEP with a variable offset from a global. In this case we can
2675 // remove it from the liveset. We already don't add constants to the liveset
2676 // because we assume they won't move at runtime and the GC doesn't need to be
2677 // informed about them. The same reasoning applies if the base is constant.
2678 // Note that the relocation placement code relies on this filtering for
2679 // correctness as it expects the base to be in the liveset, which isn't true
2680 // if the base is constant.
2681 for (auto &Info : Records) {
2682 Info.LiveSet.remove_if([&](Value *LiveV) {
2683 assert(PointerToBase.count(LiveV) && "Missed base for derived pointer");
2684 return isa<Constant>(PointerToBase[LiveV]);
2685 });
2686 }
2687
2688 for (CallInst *CI : Holders)
2689 CI->eraseFromParent();
2690
2691 Holders.clear();
2692
2693 // Compute the cost of possible re-materialization of derived pointers.
2694 RematCandTy RematerizationCandidates;
2695 findRematerializationCandidates(PointerToBase, RematerizationCandidates, TTI);
2696
2697 // In order to reduce live set of statepoint we might choose to rematerialize
2698 // some values instead of relocating them. This is purely an optimization and
2699 // does not influence correctness.
2700 for (size_t i = 0; i < Records.size(); i++)
2701 rematerializeLiveValues(ToUpdate[i], Records[i], PointerToBase,
2702 RematerizationCandidates, TTI);
2703
2704 // We need this to safely RAUW and delete call or invoke return values that
2705 // may themselves be live over a statepoint. For details, please see usage in
2706 // makeStatepointExplicitImpl.
2707 std::vector<DeferredReplacement> Replacements;
2708
2709 // Now run through and replace the existing statepoints with new ones with
2710 // the live variables listed. We do not yet update uses of the values being
2711 // relocated. We have references to live variables that need to
2712 // survive to the last iteration of this loop. (By construction, the
2713 // previous statepoint can not be a live variable, thus we can and remove
2714 // the old statepoint calls as we go.)
2715 for (size_t i = 0; i < Records.size(); i++)
2716 makeStatepointExplicit(DT, ToUpdate[i], Records[i], Replacements,
2717 PointerToBase);
2718
2719 ToUpdate.clear(); // prevent accident use of invalid calls.
2720
2721 for (auto &PR : Replacements)
2722 PR.doReplacement();
2723
2724 Replacements.clear();
2725
2726 for (auto &Info : Records) {
2727 // These live sets may contain state Value pointers, since we replaced calls
2728 // with operand bundles with calls wrapped in gc.statepoint, and some of
2729 // those calls may have been def'ing live gc pointers. Clear these out to
2730 // avoid accidentally using them.
2731 //
2732 // TODO: We should create a separate data structure that does not contain
2733 // these live sets, and migrate to using that data structure from this point
2734 // onward.
2735 Info.LiveSet.clear();
2736 }
2737 PointerToBase.clear();
2738
2739 // Do all the fixups of the original live variables to their relocated selves
2740 SmallVector<Value *, 128> Live;
2741 for (size_t i = 0; i < Records.size(); i++) {
2742 PartiallyConstructedSafepointRecord &Info = Records[i];
2743
2744 // We can't simply save the live set from the original insertion. One of
2745 // the live values might be the result of a call which needs a safepoint.
2746 // That Value* no longer exists and we need to use the new gc_result.
2747 // Thankfully, the live set is embedded in the statepoint (and updated), so
2748 // we just grab that.
2749 llvm::append_range(Live, Info.StatepointToken->gc_args());
2750 #ifndef NDEBUG
2751 // Do some basic validation checking on our liveness results before
2752 // performing relocation. Relocation can and will turn mistakes in liveness
2753 // results into non-sensical code which is must harder to debug.
2754 // TODO: It would be nice to test consistency as well
2755 assert(DT.isReachableFromEntry(Info.StatepointToken->getParent()) &&
2756 "statepoint must be reachable or liveness is meaningless");
2757 for (Value *V : Info.StatepointToken->gc_args()) {
2758 if (!isa<Instruction>(V))
2759 // Non-instruction values trivial dominate all possible uses
2760 continue;
2761 auto *LiveInst = cast<Instruction>(V);
2762 assert(DT.isReachableFromEntry(LiveInst->getParent()) &&
2763 "unreachable values should never be live");
2764 assert(DT.dominates(LiveInst, Info.StatepointToken) &&
2765 "basic SSA liveness expectation violated by liveness analysis");
2766 }
2767 #endif
2768 }
2769 unique_unsorted(Live);
2770
2771 #ifndef NDEBUG
2772 // Validation check
2773 for (auto *Ptr : Live)
2774 assert(isHandledGCPointerType(Ptr->getType()) &&
2775 "must be a gc pointer type");
2776 #endif
2777
2778 relocationViaAlloca(F, DT, Live, Records);
2779 return !Records.empty();
2780 }
2781
2782 // List of all parameter and return attributes which must be stripped when
2783 // lowering from the abstract machine model. Note that we list attributes
2784 // here which aren't valid as return attributes, that is okay.
getParamAndReturnAttributesToRemove()2785 static AttributeMask getParamAndReturnAttributesToRemove() {
2786 AttributeMask R;
2787 R.addAttribute(Attribute::Dereferenceable);
2788 R.addAttribute(Attribute::DereferenceableOrNull);
2789 R.addAttribute(Attribute::ReadNone);
2790 R.addAttribute(Attribute::ReadOnly);
2791 R.addAttribute(Attribute::WriteOnly);
2792 R.addAttribute(Attribute::NoAlias);
2793 R.addAttribute(Attribute::NoFree);
2794 return R;
2795 }
2796
stripNonValidAttributesFromPrototype(Function & F)2797 static void stripNonValidAttributesFromPrototype(Function &F) {
2798 LLVMContext &Ctx = F.getContext();
2799
2800 // Intrinsics are very delicate. Lowering sometimes depends the presence
2801 // of certain attributes for correctness, but we may have also inferred
2802 // additional ones in the abstract machine model which need stripped. This
2803 // assumes that the attributes defined in Intrinsic.td are conservatively
2804 // correct for both physical and abstract model.
2805 if (Intrinsic::ID id = F.getIntrinsicID()) {
2806 F.setAttributes(Intrinsic::getAttributes(Ctx, id));
2807 return;
2808 }
2809
2810 AttributeMask R = getParamAndReturnAttributesToRemove();
2811 for (Argument &A : F.args())
2812 if (isa<PointerType>(A.getType()))
2813 F.removeParamAttrs(A.getArgNo(), R);
2814
2815 if (isa<PointerType>(F.getReturnType()))
2816 F.removeRetAttrs(R);
2817
2818 for (auto Attr : FnAttrsToStrip)
2819 F.removeFnAttr(Attr);
2820 }
2821
2822 /// Certain metadata on instructions are invalid after running RS4GC.
2823 /// Optimizations that run after RS4GC can incorrectly use this metadata to
2824 /// optimize functions. We drop such metadata on the instruction.
stripInvalidMetadataFromInstruction(Instruction & I)2825 static void stripInvalidMetadataFromInstruction(Instruction &I) {
2826 if (!isa<LoadInst>(I) && !isa<StoreInst>(I))
2827 return;
2828 // These are the attributes that are still valid on loads and stores after
2829 // RS4GC.
2830 // The metadata implying dereferenceability and noalias are (conservatively)
2831 // dropped. This is because semantically, after RewriteStatepointsForGC runs,
2832 // all calls to gc.statepoint "free" the entire heap. Also, gc.statepoint can
2833 // touch the entire heap including noalias objects. Note: The reasoning is
2834 // same as stripping the dereferenceability and noalias attributes that are
2835 // analogous to the metadata counterparts.
2836 // We also drop the invariant.load metadata on the load because that metadata
2837 // implies the address operand to the load points to memory that is never
2838 // changed once it became dereferenceable. This is no longer true after RS4GC.
2839 // Similar reasoning applies to invariant.group metadata, which applies to
2840 // loads within a group.
2841 unsigned ValidMetadataAfterRS4GC[] = {LLVMContext::MD_tbaa,
2842 LLVMContext::MD_range,
2843 LLVMContext::MD_alias_scope,
2844 LLVMContext::MD_nontemporal,
2845 LLVMContext::MD_nonnull,
2846 LLVMContext::MD_align,
2847 LLVMContext::MD_type};
2848
2849 // Drops all metadata on the instruction other than ValidMetadataAfterRS4GC.
2850 I.dropUnknownNonDebugMetadata(ValidMetadataAfterRS4GC);
2851 }
2852
stripNonValidDataFromBody(Function & F)2853 static void stripNonValidDataFromBody(Function &F) {
2854 if (F.empty())
2855 return;
2856
2857 LLVMContext &Ctx = F.getContext();
2858 MDBuilder Builder(Ctx);
2859
2860 // Set of invariantstart instructions that we need to remove.
2861 // Use this to avoid invalidating the instruction iterator.
2862 SmallVector<IntrinsicInst*, 12> InvariantStartInstructions;
2863
2864 for (Instruction &I : instructions(F)) {
2865 // invariant.start on memory location implies that the referenced memory
2866 // location is constant and unchanging. This is no longer true after
2867 // RewriteStatepointsForGC runs because there can be calls to gc.statepoint
2868 // which frees the entire heap and the presence of invariant.start allows
2869 // the optimizer to sink the load of a memory location past a statepoint,
2870 // which is incorrect.
2871 if (auto *II = dyn_cast<IntrinsicInst>(&I))
2872 if (II->getIntrinsicID() == Intrinsic::invariant_start) {
2873 InvariantStartInstructions.push_back(II);
2874 continue;
2875 }
2876
2877 if (MDNode *Tag = I.getMetadata(LLVMContext::MD_tbaa)) {
2878 MDNode *MutableTBAA = Builder.createMutableTBAAAccessTag(Tag);
2879 I.setMetadata(LLVMContext::MD_tbaa, MutableTBAA);
2880 }
2881
2882 stripInvalidMetadataFromInstruction(I);
2883
2884 AttributeMask R = getParamAndReturnAttributesToRemove();
2885 if (auto *Call = dyn_cast<CallBase>(&I)) {
2886 for (int i = 0, e = Call->arg_size(); i != e; i++)
2887 if (isa<PointerType>(Call->getArgOperand(i)->getType()))
2888 Call->removeParamAttrs(i, R);
2889 if (isa<PointerType>(Call->getType()))
2890 Call->removeRetAttrs(R);
2891 }
2892 }
2893
2894 // Delete the invariant.start instructions and RAUW undef.
2895 for (auto *II : InvariantStartInstructions) {
2896 II->replaceAllUsesWith(UndefValue::get(II->getType()));
2897 II->eraseFromParent();
2898 }
2899 }
2900
2901 /// Returns true if this function should be rewritten by this pass. The main
2902 /// point of this function is as an extension point for custom logic.
shouldRewriteStatepointsIn(Function & F)2903 static bool shouldRewriteStatepointsIn(Function &F) {
2904 // TODO: This should check the GCStrategy
2905 if (F.hasGC()) {
2906 const auto &FunctionGCName = F.getGC();
2907 const StringRef StatepointExampleName("statepoint-example");
2908 const StringRef CoreCLRName("coreclr");
2909 return (StatepointExampleName == FunctionGCName) ||
2910 (CoreCLRName == FunctionGCName);
2911 } else
2912 return false;
2913 }
2914
stripNonValidData(Module & M)2915 static void stripNonValidData(Module &M) {
2916 #ifndef NDEBUG
2917 assert(llvm::any_of(M, shouldRewriteStatepointsIn) && "precondition!");
2918 #endif
2919
2920 for (Function &F : M)
2921 stripNonValidAttributesFromPrototype(F);
2922
2923 for (Function &F : M)
2924 stripNonValidDataFromBody(F);
2925 }
2926
runOnFunction(Function & F,DominatorTree & DT,TargetTransformInfo & TTI,const TargetLibraryInfo & TLI)2927 bool RewriteStatepointsForGC::runOnFunction(Function &F, DominatorTree &DT,
2928 TargetTransformInfo &TTI,
2929 const TargetLibraryInfo &TLI) {
2930 assert(!F.isDeclaration() && !F.empty() &&
2931 "need function body to rewrite statepoints in");
2932 assert(shouldRewriteStatepointsIn(F) && "mismatch in rewrite decision");
2933
2934 auto NeedsRewrite = [&TLI](Instruction &I) {
2935 if (const auto *Call = dyn_cast<CallBase>(&I)) {
2936 if (isa<GCStatepointInst>(Call))
2937 return false;
2938 if (callsGCLeafFunction(Call, TLI))
2939 return false;
2940
2941 // Normally it's up to the frontend to make sure that non-leaf calls also
2942 // have proper deopt state if it is required. We make an exception for
2943 // element atomic memcpy/memmove intrinsics here. Unlike other intrinsics
2944 // these are non-leaf by default. They might be generated by the optimizer
2945 // which doesn't know how to produce a proper deopt state. So if we see a
2946 // non-leaf memcpy/memmove without deopt state just treat it as a leaf
2947 // copy and don't produce a statepoint.
2948 if (!AllowStatepointWithNoDeoptInfo &&
2949 !Call->getOperandBundle(LLVMContext::OB_deopt)) {
2950 assert((isa<AtomicMemCpyInst>(Call) || isa<AtomicMemMoveInst>(Call)) &&
2951 "Don't expect any other calls here!");
2952 return false;
2953 }
2954 return true;
2955 }
2956 return false;
2957 };
2958
2959 // Delete any unreachable statepoints so that we don't have unrewritten
2960 // statepoints surviving this pass. This makes testing easier and the
2961 // resulting IR less confusing to human readers.
2962 DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Lazy);
2963 bool MadeChange = removeUnreachableBlocks(F, &DTU);
2964 // Flush the Dominator Tree.
2965 DTU.getDomTree();
2966
2967 // Gather all the statepoints which need rewritten. Be careful to only
2968 // consider those in reachable code since we need to ask dominance queries
2969 // when rewriting. We'll delete the unreachable ones in a moment.
2970 SmallVector<CallBase *, 64> ParsePointNeeded;
2971 SmallVector<CallInst *, 64> Intrinsics;
2972 for (Instruction &I : instructions(F)) {
2973 // TODO: only the ones with the flag set!
2974 if (NeedsRewrite(I)) {
2975 // NOTE removeUnreachableBlocks() is stronger than
2976 // DominatorTree::isReachableFromEntry(). In other words
2977 // removeUnreachableBlocks can remove some blocks for which
2978 // isReachableFromEntry() returns true.
2979 assert(DT.isReachableFromEntry(I.getParent()) &&
2980 "no unreachable blocks expected");
2981 ParsePointNeeded.push_back(cast<CallBase>(&I));
2982 }
2983 if (auto *CI = dyn_cast<CallInst>(&I))
2984 if (CI->getIntrinsicID() == Intrinsic::experimental_gc_get_pointer_base ||
2985 CI->getIntrinsicID() == Intrinsic::experimental_gc_get_pointer_offset)
2986 Intrinsics.emplace_back(CI);
2987 }
2988
2989 // Return early if no work to do.
2990 if (ParsePointNeeded.empty() && Intrinsics.empty())
2991 return MadeChange;
2992
2993 // As a prepass, go ahead and aggressively destroy single entry phi nodes.
2994 // These are created by LCSSA. They have the effect of increasing the size
2995 // of liveness sets for no good reason. It may be harder to do this post
2996 // insertion since relocations and base phis can confuse things.
2997 for (BasicBlock &BB : F)
2998 if (BB.getUniquePredecessor())
2999 MadeChange |= FoldSingleEntryPHINodes(&BB);
3000
3001 // Before we start introducing relocations, we want to tweak the IR a bit to
3002 // avoid unfortunate code generation effects. The main example is that we
3003 // want to try to make sure the comparison feeding a branch is after any
3004 // safepoints. Otherwise, we end up with a comparison of pre-relocation
3005 // values feeding a branch after relocation. This is semantically correct,
3006 // but results in extra register pressure since both the pre-relocation and
3007 // post-relocation copies must be available in registers. For code without
3008 // relocations this is handled elsewhere, but teaching the scheduler to
3009 // reverse the transform we're about to do would be slightly complex.
3010 // Note: This may extend the live range of the inputs to the icmp and thus
3011 // increase the liveset of any statepoint we move over. This is profitable
3012 // as long as all statepoints are in rare blocks. If we had in-register
3013 // lowering for live values this would be a much safer transform.
3014 auto getConditionInst = [](Instruction *TI) -> Instruction * {
3015 if (auto *BI = dyn_cast<BranchInst>(TI))
3016 if (BI->isConditional())
3017 return dyn_cast<Instruction>(BI->getCondition());
3018 // TODO: Extend this to handle switches
3019 return nullptr;
3020 };
3021 for (BasicBlock &BB : F) {
3022 Instruction *TI = BB.getTerminator();
3023 if (auto *Cond = getConditionInst(TI))
3024 // TODO: Handle more than just ICmps here. We should be able to move
3025 // most instructions without side effects or memory access.
3026 if (isa<ICmpInst>(Cond) && Cond->hasOneUse()) {
3027 MadeChange = true;
3028 Cond->moveBefore(TI);
3029 }
3030 }
3031
3032 // Nasty workaround - The base computation code in the main algorithm doesn't
3033 // consider the fact that a GEP can be used to convert a scalar to a vector.
3034 // The right fix for this is to integrate GEPs into the base rewriting
3035 // algorithm properly, this is just a short term workaround to prevent
3036 // crashes by canonicalizing such GEPs into fully vector GEPs.
3037 for (Instruction &I : instructions(F)) {
3038 if (!isa<GetElementPtrInst>(I))
3039 continue;
3040
3041 unsigned VF = 0;
3042 for (unsigned i = 0; i < I.getNumOperands(); i++)
3043 if (auto *OpndVTy = dyn_cast<VectorType>(I.getOperand(i)->getType())) {
3044 assert(VF == 0 ||
3045 VF == cast<FixedVectorType>(OpndVTy)->getNumElements());
3046 VF = cast<FixedVectorType>(OpndVTy)->getNumElements();
3047 }
3048
3049 // It's the vector to scalar traversal through the pointer operand which
3050 // confuses base pointer rewriting, so limit ourselves to that case.
3051 if (!I.getOperand(0)->getType()->isVectorTy() && VF != 0) {
3052 IRBuilder<> B(&I);
3053 auto *Splat = B.CreateVectorSplat(VF, I.getOperand(0));
3054 I.setOperand(0, Splat);
3055 MadeChange = true;
3056 }
3057 }
3058
3059 // Cache the 'defining value' relation used in the computation and
3060 // insertion of base phis and selects. This ensures that we don't insert
3061 // large numbers of duplicate base_phis. Use one cache for both
3062 // inlineGetBaseAndOffset() and insertParsePoints().
3063 DefiningValueMapTy DVCache;
3064
3065 // Mapping between a base values and a flag indicating whether it's a known
3066 // base or not.
3067 IsKnownBaseMapTy KnownBases;
3068
3069 if (!Intrinsics.empty())
3070 // Inline @gc.get.pointer.base() and @gc.get.pointer.offset() before finding
3071 // live references.
3072 MadeChange |= inlineGetBaseAndOffset(F, Intrinsics, DVCache, KnownBases);
3073
3074 if (!ParsePointNeeded.empty())
3075 MadeChange |=
3076 insertParsePoints(F, DT, TTI, ParsePointNeeded, DVCache, KnownBases);
3077
3078 return MadeChange;
3079 }
3080
3081 // liveness computation via standard dataflow
3082 // -------------------------------------------------------------------
3083
3084 // TODO: Consider using bitvectors for liveness, the set of potentially
3085 // interesting values should be small and easy to pre-compute.
3086
3087 /// Compute the live-in set for the location rbegin starting from
3088 /// the live-out set of the basic block
computeLiveInValues(BasicBlock::reverse_iterator Begin,BasicBlock::reverse_iterator End,SetVector<Value * > & LiveTmp)3089 static void computeLiveInValues(BasicBlock::reverse_iterator Begin,
3090 BasicBlock::reverse_iterator End,
3091 SetVector<Value *> &LiveTmp) {
3092 for (auto &I : make_range(Begin, End)) {
3093 // KILL/Def - Remove this definition from LiveIn
3094 LiveTmp.remove(&I);
3095
3096 // Don't consider *uses* in PHI nodes, we handle their contribution to
3097 // predecessor blocks when we seed the LiveOut sets
3098 if (isa<PHINode>(I))
3099 continue;
3100
3101 // USE - Add to the LiveIn set for this instruction
3102 for (Value *V : I.operands()) {
3103 assert(!isUnhandledGCPointerType(V->getType()) &&
3104 "support for FCA unimplemented");
3105 if (isHandledGCPointerType(V->getType()) && !isa<Constant>(V)) {
3106 // The choice to exclude all things constant here is slightly subtle.
3107 // There are two independent reasons:
3108 // - We assume that things which are constant (from LLVM's definition)
3109 // do not move at runtime. For example, the address of a global
3110 // variable is fixed, even though it's contents may not be.
3111 // - Second, we can't disallow arbitrary inttoptr constants even
3112 // if the language frontend does. Optimization passes are free to
3113 // locally exploit facts without respect to global reachability. This
3114 // can create sections of code which are dynamically unreachable and
3115 // contain just about anything. (see constants.ll in tests)
3116 LiveTmp.insert(V);
3117 }
3118 }
3119 }
3120 }
3121
computeLiveOutSeed(BasicBlock * BB,SetVector<Value * > & LiveTmp)3122 static void computeLiveOutSeed(BasicBlock *BB, SetVector<Value *> &LiveTmp) {
3123 for (BasicBlock *Succ : successors(BB)) {
3124 for (auto &I : *Succ) {
3125 PHINode *PN = dyn_cast<PHINode>(&I);
3126 if (!PN)
3127 break;
3128
3129 Value *V = PN->getIncomingValueForBlock(BB);
3130 assert(!isUnhandledGCPointerType(V->getType()) &&
3131 "support for FCA unimplemented");
3132 if (isHandledGCPointerType(V->getType()) && !isa<Constant>(V))
3133 LiveTmp.insert(V);
3134 }
3135 }
3136 }
3137
computeKillSet(BasicBlock * BB)3138 static SetVector<Value *> computeKillSet(BasicBlock *BB) {
3139 SetVector<Value *> KillSet;
3140 for (Instruction &I : *BB)
3141 if (isHandledGCPointerType(I.getType()))
3142 KillSet.insert(&I);
3143 return KillSet;
3144 }
3145
3146 #ifndef NDEBUG
3147 /// Check that the items in 'Live' dominate 'TI'. This is used as a basic
3148 /// validation check for the liveness computation.
checkBasicSSA(DominatorTree & DT,SetVector<Value * > & Live,Instruction * TI,bool TermOkay=false)3149 static void checkBasicSSA(DominatorTree &DT, SetVector<Value *> &Live,
3150 Instruction *TI, bool TermOkay = false) {
3151 for (Value *V : Live) {
3152 if (auto *I = dyn_cast<Instruction>(V)) {
3153 // The terminator can be a member of the LiveOut set. LLVM's definition
3154 // of instruction dominance states that V does not dominate itself. As
3155 // such, we need to special case this to allow it.
3156 if (TermOkay && TI == I)
3157 continue;
3158 assert(DT.dominates(I, TI) &&
3159 "basic SSA liveness expectation violated by liveness analysis");
3160 }
3161 }
3162 }
3163
3164 /// Check that all the liveness sets used during the computation of liveness
3165 /// obey basic SSA properties. This is useful for finding cases where we miss
3166 /// a def.
checkBasicSSA(DominatorTree & DT,GCPtrLivenessData & Data,BasicBlock & BB)3167 static void checkBasicSSA(DominatorTree &DT, GCPtrLivenessData &Data,
3168 BasicBlock &BB) {
3169 checkBasicSSA(DT, Data.LiveSet[&BB], BB.getTerminator());
3170 checkBasicSSA(DT, Data.LiveOut[&BB], BB.getTerminator(), true);
3171 checkBasicSSA(DT, Data.LiveIn[&BB], BB.getTerminator());
3172 }
3173 #endif
3174
computeLiveInValues(DominatorTree & DT,Function & F,GCPtrLivenessData & Data)3175 static void computeLiveInValues(DominatorTree &DT, Function &F,
3176 GCPtrLivenessData &Data) {
3177 SmallSetVector<BasicBlock *, 32> Worklist;
3178
3179 // Seed the liveness for each individual block
3180 for (BasicBlock &BB : F) {
3181 Data.KillSet[&BB] = computeKillSet(&BB);
3182 Data.LiveSet[&BB].clear();
3183 computeLiveInValues(BB.rbegin(), BB.rend(), Data.LiveSet[&BB]);
3184
3185 #ifndef NDEBUG
3186 for (Value *Kill : Data.KillSet[&BB])
3187 assert(!Data.LiveSet[&BB].count(Kill) && "live set contains kill");
3188 #endif
3189
3190 Data.LiveOut[&BB] = SetVector<Value *>();
3191 computeLiveOutSeed(&BB, Data.LiveOut[&BB]);
3192 Data.LiveIn[&BB] = Data.LiveSet[&BB];
3193 Data.LiveIn[&BB].set_union(Data.LiveOut[&BB]);
3194 Data.LiveIn[&BB].set_subtract(Data.KillSet[&BB]);
3195 if (!Data.LiveIn[&BB].empty())
3196 Worklist.insert(pred_begin(&BB), pred_end(&BB));
3197 }
3198
3199 // Propagate that liveness until stable
3200 while (!Worklist.empty()) {
3201 BasicBlock *BB = Worklist.pop_back_val();
3202
3203 // Compute our new liveout set, then exit early if it hasn't changed despite
3204 // the contribution of our successor.
3205 SetVector<Value *> LiveOut = Data.LiveOut[BB];
3206 const auto OldLiveOutSize = LiveOut.size();
3207 for (BasicBlock *Succ : successors(BB)) {
3208 assert(Data.LiveIn.count(Succ));
3209 LiveOut.set_union(Data.LiveIn[Succ]);
3210 }
3211 // assert OutLiveOut is a subset of LiveOut
3212 if (OldLiveOutSize == LiveOut.size()) {
3213 // If the sets are the same size, then we didn't actually add anything
3214 // when unioning our successors LiveIn. Thus, the LiveIn of this block
3215 // hasn't changed.
3216 continue;
3217 }
3218 Data.LiveOut[BB] = LiveOut;
3219
3220 // Apply the effects of this basic block
3221 SetVector<Value *> LiveTmp = LiveOut;
3222 LiveTmp.set_union(Data.LiveSet[BB]);
3223 LiveTmp.set_subtract(Data.KillSet[BB]);
3224
3225 assert(Data.LiveIn.count(BB));
3226 const SetVector<Value *> &OldLiveIn = Data.LiveIn[BB];
3227 // assert: OldLiveIn is a subset of LiveTmp
3228 if (OldLiveIn.size() != LiveTmp.size()) {
3229 Data.LiveIn[BB] = LiveTmp;
3230 Worklist.insert(pred_begin(BB), pred_end(BB));
3231 }
3232 } // while (!Worklist.empty())
3233
3234 #ifndef NDEBUG
3235 // Verify our output against SSA properties. This helps catch any
3236 // missing kills during the above iteration.
3237 for (BasicBlock &BB : F)
3238 checkBasicSSA(DT, Data, BB);
3239 #endif
3240 }
3241
findLiveSetAtInst(Instruction * Inst,GCPtrLivenessData & Data,StatepointLiveSetTy & Out)3242 static void findLiveSetAtInst(Instruction *Inst, GCPtrLivenessData &Data,
3243 StatepointLiveSetTy &Out) {
3244 BasicBlock *BB = Inst->getParent();
3245
3246 // Note: The copy is intentional and required
3247 assert(Data.LiveOut.count(BB));
3248 SetVector<Value *> LiveOut = Data.LiveOut[BB];
3249
3250 // We want to handle the statepoint itself oddly. It's
3251 // call result is not live (normal), nor are it's arguments
3252 // (unless they're used again later). This adjustment is
3253 // specifically what we need to relocate
3254 computeLiveInValues(BB->rbegin(), ++Inst->getIterator().getReverse(),
3255 LiveOut);
3256 LiveOut.remove(Inst);
3257 Out.insert(LiveOut.begin(), LiveOut.end());
3258 }
3259
recomputeLiveInValues(GCPtrLivenessData & RevisedLivenessData,CallBase * Call,PartiallyConstructedSafepointRecord & Info,PointerToBaseTy & PointerToBase)3260 static void recomputeLiveInValues(GCPtrLivenessData &RevisedLivenessData,
3261 CallBase *Call,
3262 PartiallyConstructedSafepointRecord &Info,
3263 PointerToBaseTy &PointerToBase) {
3264 StatepointLiveSetTy Updated;
3265 findLiveSetAtInst(Call, RevisedLivenessData, Updated);
3266
3267 // We may have base pointers which are now live that weren't before. We need
3268 // to update the PointerToBase structure to reflect this.
3269 for (auto V : Updated)
3270 PointerToBase.insert({ V, V });
3271
3272 Info.LiveSet = Updated;
3273 }
3274