1 //===- RewriteStatepointsForGC.cpp - Make GC relocations explicit ---------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // Rewrite call/invoke instructions so as to make potential relocations
10 // performed by the garbage collector explicit in the IR.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #include "llvm/Transforms/Scalar/RewriteStatepointsForGC.h"
15
16 #include "llvm/ADT/ArrayRef.h"
17 #include "llvm/ADT/DenseMap.h"
18 #include "llvm/ADT/DenseSet.h"
19 #include "llvm/ADT/MapVector.h"
20 #include "llvm/ADT/None.h"
21 #include "llvm/ADT/Optional.h"
22 #include "llvm/ADT/STLExtras.h"
23 #include "llvm/ADT/SetVector.h"
24 #include "llvm/ADT/SmallSet.h"
25 #include "llvm/ADT/SmallVector.h"
26 #include "llvm/ADT/StringRef.h"
27 #include "llvm/ADT/iterator_range.h"
28 #include "llvm/Analysis/DomTreeUpdater.h"
29 #include "llvm/Analysis/TargetLibraryInfo.h"
30 #include "llvm/Analysis/TargetTransformInfo.h"
31 #include "llvm/IR/Argument.h"
32 #include "llvm/IR/Attributes.h"
33 #include "llvm/IR/BasicBlock.h"
34 #include "llvm/IR/CallingConv.h"
35 #include "llvm/IR/Constant.h"
36 #include "llvm/IR/Constants.h"
37 #include "llvm/IR/DataLayout.h"
38 #include "llvm/IR/DerivedTypes.h"
39 #include "llvm/IR/Dominators.h"
40 #include "llvm/IR/Function.h"
41 #include "llvm/IR/IRBuilder.h"
42 #include "llvm/IR/InstIterator.h"
43 #include "llvm/IR/InstrTypes.h"
44 #include "llvm/IR/Instruction.h"
45 #include "llvm/IR/Instructions.h"
46 #include "llvm/IR/IntrinsicInst.h"
47 #include "llvm/IR/Intrinsics.h"
48 #include "llvm/IR/LLVMContext.h"
49 #include "llvm/IR/MDBuilder.h"
50 #include "llvm/IR/Metadata.h"
51 #include "llvm/IR/Module.h"
52 #include "llvm/IR/Statepoint.h"
53 #include "llvm/IR/Type.h"
54 #include "llvm/IR/User.h"
55 #include "llvm/IR/Value.h"
56 #include "llvm/IR/ValueHandle.h"
57 #include "llvm/InitializePasses.h"
58 #include "llvm/Pass.h"
59 #include "llvm/Support/Casting.h"
60 #include "llvm/Support/CommandLine.h"
61 #include "llvm/Support/Compiler.h"
62 #include "llvm/Support/Debug.h"
63 #include "llvm/Support/ErrorHandling.h"
64 #include "llvm/Support/raw_ostream.h"
65 #include "llvm/Transforms/Scalar.h"
66 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
67 #include "llvm/Transforms/Utils/Local.h"
68 #include "llvm/Transforms/Utils/PromoteMemToReg.h"
69 #include <algorithm>
70 #include <cassert>
71 #include <cstddef>
72 #include <cstdint>
73 #include <iterator>
74 #include <set>
75 #include <string>
76 #include <utility>
77 #include <vector>
78
79 #define DEBUG_TYPE "rewrite-statepoints-for-gc"
80
81 using namespace llvm;
82
83 // Print the liveset found at the insert location
84 static cl::opt<bool> PrintLiveSet("spp-print-liveset", cl::Hidden,
85 cl::init(false));
86 static cl::opt<bool> PrintLiveSetSize("spp-print-liveset-size", cl::Hidden,
87 cl::init(false));
88
89 // Print out the base pointers for debugging
90 static cl::opt<bool> PrintBasePointers("spp-print-base-pointers", cl::Hidden,
91 cl::init(false));
92
93 // Cost threshold measuring when it is profitable to rematerialize value instead
94 // of relocating it
95 static cl::opt<unsigned>
96 RematerializationThreshold("spp-rematerialization-threshold", cl::Hidden,
97 cl::init(6));
98
99 #ifdef EXPENSIVE_CHECKS
100 static bool ClobberNonLive = true;
101 #else
102 static bool ClobberNonLive = false;
103 #endif
104
105 static cl::opt<bool, true> ClobberNonLiveOverride("rs4gc-clobber-non-live",
106 cl::location(ClobberNonLive),
107 cl::Hidden);
108
109 static cl::opt<bool>
110 AllowStatepointWithNoDeoptInfo("rs4gc-allow-statepoint-with-no-deopt-info",
111 cl::Hidden, cl::init(true));
112
113 /// The IR fed into RewriteStatepointsForGC may have had attributes and
114 /// metadata implying dereferenceability that are no longer valid/correct after
115 /// RewriteStatepointsForGC has run. This is because semantically, after
116 /// RewriteStatepointsForGC runs, all calls to gc.statepoint "free" the entire
117 /// heap. stripNonValidData (conservatively) restores
118 /// correctness by erasing all attributes in the module that externally imply
119 /// dereferenceability. Similar reasoning also applies to the noalias
120 /// attributes and metadata. gc.statepoint can touch the entire heap including
121 /// noalias objects.
122 /// Apart from attributes and metadata, we also remove instructions that imply
123 /// constant physical memory: llvm.invariant.start.
124 static void stripNonValidData(Module &M);
125
126 static bool shouldRewriteStatepointsIn(Function &F);
127
run(Module & M,ModuleAnalysisManager & AM)128 PreservedAnalyses RewriteStatepointsForGC::run(Module &M,
129 ModuleAnalysisManager &AM) {
130 bool Changed = false;
131 auto &FAM = AM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager();
132 for (Function &F : M) {
133 // Nothing to do for declarations.
134 if (F.isDeclaration() || F.empty())
135 continue;
136
137 // Policy choice says not to rewrite - the most common reason is that we're
138 // compiling code without a GCStrategy.
139 if (!shouldRewriteStatepointsIn(F))
140 continue;
141
142 auto &DT = FAM.getResult<DominatorTreeAnalysis>(F);
143 auto &TTI = FAM.getResult<TargetIRAnalysis>(F);
144 auto &TLI = FAM.getResult<TargetLibraryAnalysis>(F);
145 Changed |= runOnFunction(F, DT, TTI, TLI);
146 }
147 if (!Changed)
148 return PreservedAnalyses::all();
149
150 // stripNonValidData asserts that shouldRewriteStatepointsIn
151 // returns true for at least one function in the module. Since at least
152 // one function changed, we know that the precondition is satisfied.
153 stripNonValidData(M);
154
155 PreservedAnalyses PA;
156 PA.preserve<TargetIRAnalysis>();
157 PA.preserve<TargetLibraryAnalysis>();
158 return PA;
159 }
160
161 namespace {
162
163 class RewriteStatepointsForGCLegacyPass : public ModulePass {
164 RewriteStatepointsForGC Impl;
165
166 public:
167 static char ID; // Pass identification, replacement for typeid
168
RewriteStatepointsForGCLegacyPass()169 RewriteStatepointsForGCLegacyPass() : ModulePass(ID), Impl() {
170 initializeRewriteStatepointsForGCLegacyPassPass(
171 *PassRegistry::getPassRegistry());
172 }
173
runOnModule(Module & M)174 bool runOnModule(Module &M) override {
175 bool Changed = false;
176 for (Function &F : M) {
177 // Nothing to do for declarations.
178 if (F.isDeclaration() || F.empty())
179 continue;
180
181 // Policy choice says not to rewrite - the most common reason is that
182 // we're compiling code without a GCStrategy.
183 if (!shouldRewriteStatepointsIn(F))
184 continue;
185
186 TargetTransformInfo &TTI =
187 getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
188 const TargetLibraryInfo &TLI =
189 getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
190 auto &DT = getAnalysis<DominatorTreeWrapperPass>(F).getDomTree();
191
192 Changed |= Impl.runOnFunction(F, DT, TTI, TLI);
193 }
194
195 if (!Changed)
196 return false;
197
198 // stripNonValidData asserts that shouldRewriteStatepointsIn
199 // returns true for at least one function in the module. Since at least
200 // one function changed, we know that the precondition is satisfied.
201 stripNonValidData(M);
202 return true;
203 }
204
getAnalysisUsage(AnalysisUsage & AU) const205 void getAnalysisUsage(AnalysisUsage &AU) const override {
206 // We add and rewrite a bunch of instructions, but don't really do much
207 // else. We could in theory preserve a lot more analyses here.
208 AU.addRequired<DominatorTreeWrapperPass>();
209 AU.addRequired<TargetTransformInfoWrapperPass>();
210 AU.addRequired<TargetLibraryInfoWrapperPass>();
211 }
212 };
213
214 } // end anonymous namespace
215
216 char RewriteStatepointsForGCLegacyPass::ID = 0;
217
createRewriteStatepointsForGCLegacyPass()218 ModulePass *llvm::createRewriteStatepointsForGCLegacyPass() {
219 return new RewriteStatepointsForGCLegacyPass();
220 }
221
222 INITIALIZE_PASS_BEGIN(RewriteStatepointsForGCLegacyPass,
223 "rewrite-statepoints-for-gc",
224 "Make relocations explicit at statepoints", false, false)
225 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
226 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
227 INITIALIZE_PASS_END(RewriteStatepointsForGCLegacyPass,
228 "rewrite-statepoints-for-gc",
229 "Make relocations explicit at statepoints", false, false)
230
231 namespace {
232
233 struct GCPtrLivenessData {
234 /// Values defined in this block.
235 MapVector<BasicBlock *, SetVector<Value *>> KillSet;
236
237 /// Values used in this block (and thus live); does not included values
238 /// killed within this block.
239 MapVector<BasicBlock *, SetVector<Value *>> LiveSet;
240
241 /// Values live into this basic block (i.e. used by any
242 /// instruction in this basic block or ones reachable from here)
243 MapVector<BasicBlock *, SetVector<Value *>> LiveIn;
244
245 /// Values live out of this basic block (i.e. live into
246 /// any successor block)
247 MapVector<BasicBlock *, SetVector<Value *>> LiveOut;
248 };
249
250 // The type of the internal cache used inside the findBasePointers family
251 // of functions. From the callers perspective, this is an opaque type and
252 // should not be inspected.
253 //
254 // In the actual implementation this caches two relations:
255 // - The base relation itself (i.e. this pointer is based on that one)
256 // - The base defining value relation (i.e. before base_phi insertion)
257 // Generally, after the execution of a full findBasePointer call, only the
258 // base relation will remain. Internally, we add a mixture of the two
259 // types, then update all the second type to the first type
260 using DefiningValueMapTy = MapVector<Value *, Value *>;
261 using StatepointLiveSetTy = SetVector<Value *>;
262 using RematerializedValueMapTy =
263 MapVector<AssertingVH<Instruction>, AssertingVH<Value>>;
264
265 struct PartiallyConstructedSafepointRecord {
266 /// The set of values known to be live across this safepoint
267 StatepointLiveSetTy LiveSet;
268
269 /// Mapping from live pointers to a base-defining-value
270 MapVector<Value *, Value *> PointerToBase;
271
272 /// The *new* gc.statepoint instruction itself. This produces the token
273 /// that normal path gc.relocates and the gc.result are tied to.
274 GCStatepointInst *StatepointToken;
275
276 /// Instruction to which exceptional gc relocates are attached
277 /// Makes it easier to iterate through them during relocationViaAlloca.
278 Instruction *UnwindToken;
279
280 /// Record live values we are rematerialized instead of relocating.
281 /// They are not included into 'LiveSet' field.
282 /// Maps rematerialized copy to it's original value.
283 RematerializedValueMapTy RematerializedValues;
284 };
285
286 } // end anonymous namespace
287
GetDeoptBundleOperands(const CallBase * Call)288 static ArrayRef<Use> GetDeoptBundleOperands(const CallBase *Call) {
289 Optional<OperandBundleUse> DeoptBundle =
290 Call->getOperandBundle(LLVMContext::OB_deopt);
291
292 if (!DeoptBundle.hasValue()) {
293 assert(AllowStatepointWithNoDeoptInfo &&
294 "Found non-leaf call without deopt info!");
295 return None;
296 }
297
298 return DeoptBundle.getValue().Inputs;
299 }
300
301 /// Compute the live-in set for every basic block in the function
302 static void computeLiveInValues(DominatorTree &DT, Function &F,
303 GCPtrLivenessData &Data);
304
305 /// Given results from the dataflow liveness computation, find the set of live
306 /// Values at a particular instruction.
307 static void findLiveSetAtInst(Instruction *inst, GCPtrLivenessData &Data,
308 StatepointLiveSetTy &out);
309
310 // TODO: Once we can get to the GCStrategy, this becomes
311 // Optional<bool> isGCManagedPointer(const Type *Ty) const override {
312
isGCPointerType(Type * T)313 static bool isGCPointerType(Type *T) {
314 if (auto *PT = dyn_cast<PointerType>(T))
315 // For the sake of this example GC, we arbitrarily pick addrspace(1) as our
316 // GC managed heap. We know that a pointer into this heap needs to be
317 // updated and that no other pointer does.
318 return PT->getAddressSpace() == 1;
319 return false;
320 }
321
322 // Return true if this type is one which a) is a gc pointer or contains a GC
323 // pointer and b) is of a type this code expects to encounter as a live value.
324 // (The insertion code will assert that a type which matches (a) and not (b)
325 // is not encountered.)
isHandledGCPointerType(Type * T)326 static bool isHandledGCPointerType(Type *T) {
327 // We fully support gc pointers
328 if (isGCPointerType(T))
329 return true;
330 // We partially support vectors of gc pointers. The code will assert if it
331 // can't handle something.
332 if (auto VT = dyn_cast<VectorType>(T))
333 if (isGCPointerType(VT->getElementType()))
334 return true;
335 return false;
336 }
337
338 #ifndef NDEBUG
339 /// Returns true if this type contains a gc pointer whether we know how to
340 /// handle that type or not.
containsGCPtrType(Type * Ty)341 static bool containsGCPtrType(Type *Ty) {
342 if (isGCPointerType(Ty))
343 return true;
344 if (VectorType *VT = dyn_cast<VectorType>(Ty))
345 return isGCPointerType(VT->getScalarType());
346 if (ArrayType *AT = dyn_cast<ArrayType>(Ty))
347 return containsGCPtrType(AT->getElementType());
348 if (StructType *ST = dyn_cast<StructType>(Ty))
349 return llvm::any_of(ST->elements(), containsGCPtrType);
350 return false;
351 }
352
353 // Returns true if this is a type which a) is a gc pointer or contains a GC
354 // pointer and b) is of a type which the code doesn't expect (i.e. first class
355 // aggregates). Used to trip assertions.
isUnhandledGCPointerType(Type * Ty)356 static bool isUnhandledGCPointerType(Type *Ty) {
357 return containsGCPtrType(Ty) && !isHandledGCPointerType(Ty);
358 }
359 #endif
360
361 // Return the name of the value suffixed with the provided value, or if the
362 // value didn't have a name, the default value specified.
suffixed_name_or(Value * V,StringRef Suffix,StringRef DefaultName)363 static std::string suffixed_name_or(Value *V, StringRef Suffix,
364 StringRef DefaultName) {
365 return V->hasName() ? (V->getName() + Suffix).str() : DefaultName.str();
366 }
367
368 // Conservatively identifies any definitions which might be live at the
369 // given instruction. The analysis is performed immediately before the
370 // given instruction. Values defined by that instruction are not considered
371 // live. Values used by that instruction are considered live.
analyzeParsePointLiveness(DominatorTree & DT,GCPtrLivenessData & OriginalLivenessData,CallBase * Call,PartiallyConstructedSafepointRecord & Result)372 static void analyzeParsePointLiveness(
373 DominatorTree &DT, GCPtrLivenessData &OriginalLivenessData, CallBase *Call,
374 PartiallyConstructedSafepointRecord &Result) {
375 StatepointLiveSetTy LiveSet;
376 findLiveSetAtInst(Call, OriginalLivenessData, LiveSet);
377
378 if (PrintLiveSet) {
379 dbgs() << "Live Variables:\n";
380 for (Value *V : LiveSet)
381 dbgs() << " " << V->getName() << " " << *V << "\n";
382 }
383 if (PrintLiveSetSize) {
384 dbgs() << "Safepoint For: " << Call->getCalledOperand()->getName() << "\n";
385 dbgs() << "Number live values: " << LiveSet.size() << "\n";
386 }
387 Result.LiveSet = LiveSet;
388 }
389
390 // Returns true is V is a knownBaseResult.
391 static bool isKnownBaseResult(Value *V);
392
393 // Returns true if V is a BaseResult that already exists in the IR, i.e. it is
394 // not created by the findBasePointers algorithm.
395 static bool isOriginalBaseResult(Value *V);
396
397 namespace {
398
399 /// A single base defining value - An immediate base defining value for an
400 /// instruction 'Def' is an input to 'Def' whose base is also a base of 'Def'.
401 /// For instructions which have multiple pointer [vector] inputs or that
402 /// transition between vector and scalar types, there is no immediate base
403 /// defining value. The 'base defining value' for 'Def' is the transitive
404 /// closure of this relation stopping at the first instruction which has no
405 /// immediate base defining value. The b.d.v. might itself be a base pointer,
406 /// but it can also be an arbitrary derived pointer.
407 struct BaseDefiningValueResult {
408 /// Contains the value which is the base defining value.
409 Value * const BDV;
410
411 /// True if the base defining value is also known to be an actual base
412 /// pointer.
413 const bool IsKnownBase;
414
BaseDefiningValueResult__anon6368abb80311::BaseDefiningValueResult415 BaseDefiningValueResult(Value *BDV, bool IsKnownBase)
416 : BDV(BDV), IsKnownBase(IsKnownBase) {
417 #ifndef NDEBUG
418 // Check consistency between new and old means of checking whether a BDV is
419 // a base.
420 bool MustBeBase = isKnownBaseResult(BDV);
421 assert(!MustBeBase || MustBeBase == IsKnownBase);
422 #endif
423 }
424 };
425
426 } // end anonymous namespace
427
428 static BaseDefiningValueResult findBaseDefiningValue(Value *I);
429
430 /// Return a base defining value for the 'Index' element of the given vector
431 /// instruction 'I'. If Index is null, returns a BDV for the entire vector
432 /// 'I'. As an optimization, this method will try to determine when the
433 /// element is known to already be a base pointer. If this can be established,
434 /// the second value in the returned pair will be true. Note that either a
435 /// vector or a pointer typed value can be returned. For the former, the
436 /// vector returned is a BDV (and possibly a base) of the entire vector 'I'.
437 /// If the later, the return pointer is a BDV (or possibly a base) for the
438 /// particular element in 'I'.
439 static BaseDefiningValueResult
findBaseDefiningValueOfVector(Value * I)440 findBaseDefiningValueOfVector(Value *I) {
441 // Each case parallels findBaseDefiningValue below, see that code for
442 // detailed motivation.
443
444 if (isa<Argument>(I))
445 // An incoming argument to the function is a base pointer
446 return BaseDefiningValueResult(I, true);
447
448 if (isa<Constant>(I))
449 // Base of constant vector consists only of constant null pointers.
450 // For reasoning see similar case inside 'findBaseDefiningValue' function.
451 return BaseDefiningValueResult(ConstantAggregateZero::get(I->getType()),
452 true);
453
454 if (isa<LoadInst>(I))
455 return BaseDefiningValueResult(I, true);
456
457 if (isa<InsertElementInst>(I))
458 // We don't know whether this vector contains entirely base pointers or
459 // not. To be conservatively correct, we treat it as a BDV and will
460 // duplicate code as needed to construct a parallel vector of bases.
461 return BaseDefiningValueResult(I, false);
462
463 if (isa<ShuffleVectorInst>(I))
464 // We don't know whether this vector contains entirely base pointers or
465 // not. To be conservatively correct, we treat it as a BDV and will
466 // duplicate code as needed to construct a parallel vector of bases.
467 // TODO: There a number of local optimizations which could be applied here
468 // for particular sufflevector patterns.
469 return BaseDefiningValueResult(I, false);
470
471 // The behavior of getelementptr instructions is the same for vector and
472 // non-vector data types.
473 if (auto *GEP = dyn_cast<GetElementPtrInst>(I))
474 return findBaseDefiningValue(GEP->getPointerOperand());
475
476 // If the pointer comes through a bitcast of a vector of pointers to
477 // a vector of another type of pointer, then look through the bitcast
478 if (auto *BC = dyn_cast<BitCastInst>(I))
479 return findBaseDefiningValue(BC->getOperand(0));
480
481 // We assume that functions in the source language only return base
482 // pointers. This should probably be generalized via attributes to support
483 // both source language and internal functions.
484 if (isa<CallInst>(I) || isa<InvokeInst>(I))
485 return BaseDefiningValueResult(I, true);
486
487 // A PHI or Select is a base defining value. The outer findBasePointer
488 // algorithm is responsible for constructing a base value for this BDV.
489 assert((isa<SelectInst>(I) || isa<PHINode>(I)) &&
490 "unknown vector instruction - no base found for vector element");
491 return BaseDefiningValueResult(I, false);
492 }
493
494 /// Helper function for findBasePointer - Will return a value which either a)
495 /// defines the base pointer for the input, b) blocks the simple search
496 /// (i.e. a PHI or Select of two derived pointers), or c) involves a change
497 /// from pointer to vector type or back.
findBaseDefiningValue(Value * I)498 static BaseDefiningValueResult findBaseDefiningValue(Value *I) {
499 assert(I->getType()->isPtrOrPtrVectorTy() &&
500 "Illegal to ask for the base pointer of a non-pointer type");
501
502 if (I->getType()->isVectorTy())
503 return findBaseDefiningValueOfVector(I);
504
505 if (isa<Argument>(I))
506 // An incoming argument to the function is a base pointer
507 // We should have never reached here if this argument isn't an gc value
508 return BaseDefiningValueResult(I, true);
509
510 if (isa<Constant>(I)) {
511 // We assume that objects with a constant base (e.g. a global) can't move
512 // and don't need to be reported to the collector because they are always
513 // live. Besides global references, all kinds of constants (e.g. undef,
514 // constant expressions, null pointers) can be introduced by the inliner or
515 // the optimizer, especially on dynamically dead paths.
516 // Here we treat all of them as having single null base. By doing this we
517 // trying to avoid problems reporting various conflicts in a form of
518 // "phi (const1, const2)" or "phi (const, regular gc ptr)".
519 // See constant.ll file for relevant test cases.
520
521 return BaseDefiningValueResult(
522 ConstantPointerNull::get(cast<PointerType>(I->getType())), true);
523 }
524
525 // inttoptrs in an integral address space are currently ill-defined. We
526 // treat them as defining base pointers here for consistency with the
527 // constant rule above and because we don't really have a better semantic
528 // to give them. Note that the optimizer is always free to insert undefined
529 // behavior on dynamically dead paths as well.
530 if (isa<IntToPtrInst>(I))
531 return BaseDefiningValueResult(I, true);
532
533 if (CastInst *CI = dyn_cast<CastInst>(I)) {
534 Value *Def = CI->stripPointerCasts();
535 // If stripping pointer casts changes the address space there is an
536 // addrspacecast in between.
537 assert(cast<PointerType>(Def->getType())->getAddressSpace() ==
538 cast<PointerType>(CI->getType())->getAddressSpace() &&
539 "unsupported addrspacecast");
540 // If we find a cast instruction here, it means we've found a cast which is
541 // not simply a pointer cast (i.e. an inttoptr). We don't know how to
542 // handle int->ptr conversion.
543 assert(!isa<CastInst>(Def) && "shouldn't find another cast here");
544 return findBaseDefiningValue(Def);
545 }
546
547 if (isa<LoadInst>(I))
548 // The value loaded is an gc base itself
549 return BaseDefiningValueResult(I, true);
550
551 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I))
552 // The base of this GEP is the base
553 return findBaseDefiningValue(GEP->getPointerOperand());
554
555 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
556 switch (II->getIntrinsicID()) {
557 default:
558 // fall through to general call handling
559 break;
560 case Intrinsic::experimental_gc_statepoint:
561 llvm_unreachable("statepoints don't produce pointers");
562 case Intrinsic::experimental_gc_relocate:
563 // Rerunning safepoint insertion after safepoints are already
564 // inserted is not supported. It could probably be made to work,
565 // but why are you doing this? There's no good reason.
566 llvm_unreachable("repeat safepoint insertion is not supported");
567 case Intrinsic::gcroot:
568 // Currently, this mechanism hasn't been extended to work with gcroot.
569 // There's no reason it couldn't be, but I haven't thought about the
570 // implications much.
571 llvm_unreachable(
572 "interaction with the gcroot mechanism is not supported");
573 case Intrinsic::experimental_gc_get_pointer_base:
574 return findBaseDefiningValue(II->getOperand(0));
575 }
576 }
577 // We assume that functions in the source language only return base
578 // pointers. This should probably be generalized via attributes to support
579 // both source language and internal functions.
580 if (isa<CallInst>(I) || isa<InvokeInst>(I))
581 return BaseDefiningValueResult(I, true);
582
583 // TODO: I have absolutely no idea how to implement this part yet. It's not
584 // necessarily hard, I just haven't really looked at it yet.
585 assert(!isa<LandingPadInst>(I) && "Landing Pad is unimplemented");
586
587 if (isa<AtomicCmpXchgInst>(I))
588 // A CAS is effectively a atomic store and load combined under a
589 // predicate. From the perspective of base pointers, we just treat it
590 // like a load.
591 return BaseDefiningValueResult(I, true);
592
593 assert(!isa<AtomicRMWInst>(I) && "Xchg handled above, all others are "
594 "binary ops which don't apply to pointers");
595
596 // The aggregate ops. Aggregates can either be in the heap or on the
597 // stack, but in either case, this is simply a field load. As a result,
598 // this is a defining definition of the base just like a load is.
599 if (isa<ExtractValueInst>(I))
600 return BaseDefiningValueResult(I, true);
601
602 // We should never see an insert vector since that would require we be
603 // tracing back a struct value not a pointer value.
604 assert(!isa<InsertValueInst>(I) &&
605 "Base pointer for a struct is meaningless");
606
607 // This value might have been generated by findBasePointer() called when
608 // substituting gc.get.pointer.base() intrinsic.
609 bool IsKnownBase =
610 isa<Instruction>(I) && cast<Instruction>(I)->getMetadata("is_base_value");
611
612 // An extractelement produces a base result exactly when it's input does.
613 // We may need to insert a parallel instruction to extract the appropriate
614 // element out of the base vector corresponding to the input. Given this,
615 // it's analogous to the phi and select case even though it's not a merge.
616 if (isa<ExtractElementInst>(I))
617 // Note: There a lot of obvious peephole cases here. This are deliberately
618 // handled after the main base pointer inference algorithm to make writing
619 // test cases to exercise that code easier.
620 return BaseDefiningValueResult(I, IsKnownBase);
621
622 // The last two cases here don't return a base pointer. Instead, they
623 // return a value which dynamically selects from among several base
624 // derived pointers (each with it's own base potentially). It's the job of
625 // the caller to resolve these.
626 assert((isa<SelectInst>(I) || isa<PHINode>(I)) &&
627 "missing instruction case in findBaseDefiningValing");
628 return BaseDefiningValueResult(I, IsKnownBase);
629 }
630
631 /// Returns the base defining value for this value.
findBaseDefiningValueCached(Value * I,DefiningValueMapTy & Cache)632 static Value *findBaseDefiningValueCached(Value *I, DefiningValueMapTy &Cache) {
633 Value *&Cached = Cache[I];
634 if (!Cached) {
635 Cached = findBaseDefiningValue(I).BDV;
636 LLVM_DEBUG(dbgs() << "fBDV-cached: " << I->getName() << " -> "
637 << Cached->getName() << "\n");
638 }
639 assert(Cache[I] != nullptr);
640 return Cached;
641 }
642
643 /// Return a base pointer for this value if known. Otherwise, return it's
644 /// base defining value.
findBaseOrBDV(Value * I,DefiningValueMapTy & Cache)645 static Value *findBaseOrBDV(Value *I, DefiningValueMapTy &Cache) {
646 Value *Def = findBaseDefiningValueCached(I, Cache);
647 auto Found = Cache.find(Def);
648 if (Found != Cache.end()) {
649 // Either a base-of relation, or a self reference. Caller must check.
650 return Found->second;
651 }
652 // Only a BDV available
653 return Def;
654 }
655
656 /// This value is a base pointer that is not generated by RS4GC, i.e. it already
657 /// exists in the code.
isOriginalBaseResult(Value * V)658 static bool isOriginalBaseResult(Value *V) {
659 // no recursion possible
660 return !isa<PHINode>(V) && !isa<SelectInst>(V) &&
661 !isa<ExtractElementInst>(V) && !isa<InsertElementInst>(V) &&
662 !isa<ShuffleVectorInst>(V);
663 }
664
665 /// Given the result of a call to findBaseDefiningValue, or findBaseOrBDV,
666 /// is it known to be a base pointer? Or do we need to continue searching.
isKnownBaseResult(Value * V)667 static bool isKnownBaseResult(Value *V) {
668 if (isOriginalBaseResult(V))
669 return true;
670 if (isa<Instruction>(V) &&
671 cast<Instruction>(V)->getMetadata("is_base_value")) {
672 // This is a previously inserted base phi or select. We know
673 // that this is a base value.
674 return true;
675 }
676
677 // We need to keep searching
678 return false;
679 }
680
681 // Returns true if First and Second values are both scalar or both vector.
areBothVectorOrScalar(Value * First,Value * Second)682 static bool areBothVectorOrScalar(Value *First, Value *Second) {
683 return isa<VectorType>(First->getType()) ==
684 isa<VectorType>(Second->getType());
685 }
686
687 namespace {
688
689 /// Models the state of a single base defining value in the findBasePointer
690 /// algorithm for determining where a new instruction is needed to propagate
691 /// the base of this BDV.
692 class BDVState {
693 public:
694 enum StatusTy {
695 // Starting state of lattice
696 Unknown,
697 // Some specific base value -- does *not* mean that instruction
698 // propagates the base of the object
699 // ex: gep %arg, 16 -> %arg is the base value
700 Base,
701 // Need to insert a node to represent a merge.
702 Conflict
703 };
704
BDVState()705 BDVState() {
706 llvm_unreachable("missing state in map");
707 }
708
BDVState(Value * OriginalValue)709 explicit BDVState(Value *OriginalValue)
710 : OriginalValue(OriginalValue) {}
BDVState(Value * OriginalValue,StatusTy Status,Value * BaseValue=nullptr)711 explicit BDVState(Value *OriginalValue, StatusTy Status, Value *BaseValue = nullptr)
712 : OriginalValue(OriginalValue), Status(Status), BaseValue(BaseValue) {
713 assert(Status != Base || BaseValue);
714 }
715
getStatus() const716 StatusTy getStatus() const { return Status; }
getOriginalValue() const717 Value *getOriginalValue() const { return OriginalValue; }
getBaseValue() const718 Value *getBaseValue() const { return BaseValue; }
719
isBase() const720 bool isBase() const { return getStatus() == Base; }
isUnknown() const721 bool isUnknown() const { return getStatus() == Unknown; }
isConflict() const722 bool isConflict() const { return getStatus() == Conflict; }
723
724 // Values of type BDVState form a lattice, and this function implements the
725 // meet
726 // operation.
meet(const BDVState & Other)727 void meet(const BDVState &Other) {
728 auto markConflict = [&]() {
729 Status = BDVState::Conflict;
730 BaseValue = nullptr;
731 };
732 // Conflict is a final state.
733 if (isConflict())
734 return;
735 // if we are not known - just take other state.
736 if (isUnknown()) {
737 Status = Other.getStatus();
738 BaseValue = Other.getBaseValue();
739 return;
740 }
741 // We are base.
742 assert(isBase() && "Unknown state");
743 // If other is unknown - just keep our state.
744 if (Other.isUnknown())
745 return;
746 // If other is conflict - it is a final state.
747 if (Other.isConflict())
748 return markConflict();
749 // Other is base as well.
750 assert(Other.isBase() && "Unknown state");
751 // If bases are different - Conflict.
752 if (getBaseValue() != Other.getBaseValue())
753 return markConflict();
754 // We are identical, do nothing.
755 }
756
operator ==(const BDVState & Other) const757 bool operator==(const BDVState &Other) const {
758 return OriginalValue == OriginalValue && BaseValue == Other.BaseValue &&
759 Status == Other.Status;
760 }
761
operator !=(const BDVState & other) const762 bool operator!=(const BDVState &other) const { return !(*this == other); }
763
764 LLVM_DUMP_METHOD
dump() const765 void dump() const {
766 print(dbgs());
767 dbgs() << '\n';
768 }
769
print(raw_ostream & OS) const770 void print(raw_ostream &OS) const {
771 switch (getStatus()) {
772 case Unknown:
773 OS << "U";
774 break;
775 case Base:
776 OS << "B";
777 break;
778 case Conflict:
779 OS << "C";
780 break;
781 }
782 OS << " (base " << getBaseValue() << " - "
783 << (getBaseValue() ? getBaseValue()->getName() : "nullptr") << ")"
784 << " for " << OriginalValue->getName() << ":";
785 }
786
787 private:
788 AssertingVH<Value> OriginalValue; // instruction this state corresponds to
789 StatusTy Status = Unknown;
790 AssertingVH<Value> BaseValue = nullptr; // Non-null only if Status == Base.
791 };
792
793 } // end anonymous namespace
794
795 #ifndef NDEBUG
operator <<(raw_ostream & OS,const BDVState & State)796 static raw_ostream &operator<<(raw_ostream &OS, const BDVState &State) {
797 State.print(OS);
798 return OS;
799 }
800 #endif
801
802 /// For a given value or instruction, figure out what base ptr its derived from.
803 /// For gc objects, this is simply itself. On success, returns a value which is
804 /// the base pointer. (This is reliable and can be used for relocation.) On
805 /// failure, returns nullptr.
findBasePointer(Value * I,DefiningValueMapTy & Cache)806 static Value *findBasePointer(Value *I, DefiningValueMapTy &Cache) {
807 Value *Def = findBaseOrBDV(I, Cache);
808
809 if (isKnownBaseResult(Def) && areBothVectorOrScalar(Def, I))
810 return Def;
811
812 // Here's the rough algorithm:
813 // - For every SSA value, construct a mapping to either an actual base
814 // pointer or a PHI which obscures the base pointer.
815 // - Construct a mapping from PHI to unknown TOP state. Use an
816 // optimistic algorithm to propagate base pointer information. Lattice
817 // looks like:
818 // UNKNOWN
819 // b1 b2 b3 b4
820 // CONFLICT
821 // When algorithm terminates, all PHIs will either have a single concrete
822 // base or be in a conflict state.
823 // - For every conflict, insert a dummy PHI node without arguments. Add
824 // these to the base[Instruction] = BasePtr mapping. For every
825 // non-conflict, add the actual base.
826 // - For every conflict, add arguments for the base[a] of each input
827 // arguments.
828 //
829 // Note: A simpler form of this would be to add the conflict form of all
830 // PHIs without running the optimistic algorithm. This would be
831 // analogous to pessimistic data flow and would likely lead to an
832 // overall worse solution.
833
834 #ifndef NDEBUG
835 auto isExpectedBDVType = [](Value *BDV) {
836 return isa<PHINode>(BDV) || isa<SelectInst>(BDV) ||
837 isa<ExtractElementInst>(BDV) || isa<InsertElementInst>(BDV) ||
838 isa<ShuffleVectorInst>(BDV);
839 };
840 #endif
841
842 // Once populated, will contain a mapping from each potentially non-base BDV
843 // to a lattice value (described above) which corresponds to that BDV.
844 // We use the order of insertion (DFS over the def/use graph) to provide a
845 // stable deterministic ordering for visiting DenseMaps (which are unordered)
846 // below. This is important for deterministic compilation.
847 MapVector<Value *, BDVState> States;
848
849 #ifndef NDEBUG
850 auto VerifyStates = [&]() {
851 for (auto &Entry : States) {
852 assert(Entry.first == Entry.second.getOriginalValue());
853 }
854 };
855 #endif
856
857 auto visitBDVOperands = [](Value *BDV, std::function<void (Value*)> F) {
858 if (PHINode *PN = dyn_cast<PHINode>(BDV)) {
859 for (Value *InVal : PN->incoming_values())
860 F(InVal);
861 } else if (SelectInst *SI = dyn_cast<SelectInst>(BDV)) {
862 F(SI->getTrueValue());
863 F(SI->getFalseValue());
864 } else if (auto *EE = dyn_cast<ExtractElementInst>(BDV)) {
865 F(EE->getVectorOperand());
866 } else if (auto *IE = dyn_cast<InsertElementInst>(BDV)) {
867 F(IE->getOperand(0));
868 F(IE->getOperand(1));
869 } else if (auto *SV = dyn_cast<ShuffleVectorInst>(BDV)) {
870 // For a canonical broadcast, ignore the undef argument
871 // (without this, we insert a parallel base shuffle for every broadcast)
872 F(SV->getOperand(0));
873 if (!SV->isZeroEltSplat())
874 F(SV->getOperand(1));
875 } else {
876 llvm_unreachable("unexpected BDV type");
877 }
878 };
879
880
881 // Recursively fill in all base defining values reachable from the initial
882 // one for which we don't already know a definite base value for
883 /* scope */ {
884 SmallVector<Value*, 16> Worklist;
885 Worklist.push_back(Def);
886 States.insert({Def, BDVState(Def)});
887 while (!Worklist.empty()) {
888 Value *Current = Worklist.pop_back_val();
889 assert(!isOriginalBaseResult(Current) && "why did it get added?");
890
891 auto visitIncomingValue = [&](Value *InVal) {
892 Value *Base = findBaseOrBDV(InVal, Cache);
893 if (isKnownBaseResult(Base) && areBothVectorOrScalar(Base, InVal))
894 // Known bases won't need new instructions introduced and can be
895 // ignored safely. However, this can only be done when InVal and Base
896 // are both scalar or both vector. Otherwise, we need to find a
897 // correct BDV for InVal, by creating an entry in the lattice
898 // (States).
899 return;
900 assert(isExpectedBDVType(Base) && "the only non-base values "
901 "we see should be base defining values");
902 if (States.insert(std::make_pair(Base, BDVState(Base))).second)
903 Worklist.push_back(Base);
904 };
905
906 visitBDVOperands(Current, visitIncomingValue);
907 }
908 }
909
910 #ifndef NDEBUG
911 VerifyStates();
912 LLVM_DEBUG(dbgs() << "States after initialization:\n");
913 for (auto Pair : States) {
914 LLVM_DEBUG(dbgs() << " " << Pair.second << " for " << *Pair.first << "\n");
915 }
916 #endif
917
918 // Iterate forward through the value graph pruning any node from the state
919 // list where all of the inputs are base pointers. The purpose of this is to
920 // reuse existing values when the derived pointer we were asked to materialize
921 // a base pointer for happens to be a base pointer itself. (Or a sub-graph
922 // feeding it does.)
923 SmallVector<Value *> ToRemove;
924 do {
925 ToRemove.clear();
926 for (auto Pair : States) {
927 Value *BDV = Pair.first;
928 auto canPruneInput = [&](Value *V) {
929 Value *BDV = findBaseOrBDV(V, Cache);
930 if (V->stripPointerCasts() != BDV)
931 return false;
932 // The assumption is that anything not in the state list is
933 // propagates a base pointer.
934 return States.count(BDV) == 0;
935 };
936
937 bool CanPrune = true;
938 visitBDVOperands(BDV, [&](Value *Op) {
939 CanPrune = CanPrune && canPruneInput(Op);
940 });
941 if (CanPrune)
942 ToRemove.push_back(BDV);
943 }
944 for (Value *V : ToRemove) {
945 States.erase(V);
946 // Cache the fact V is it's own base for later usage.
947 Cache[V] = V;
948 }
949 } while (!ToRemove.empty());
950
951 // Did we manage to prove that Def itself must be a base pointer?
952 if (!States.count(Def))
953 return Def;
954
955 // Return a phi state for a base defining value. We'll generate a new
956 // base state for known bases and expect to find a cached state otherwise.
957 auto GetStateForBDV = [&](Value *BaseValue, Value *Input) {
958 auto I = States.find(BaseValue);
959 if (I != States.end())
960 return I->second;
961 assert(areBothVectorOrScalar(BaseValue, Input));
962 return BDVState(BaseValue, BDVState::Base, BaseValue);
963 };
964
965 bool Progress = true;
966 while (Progress) {
967 #ifndef NDEBUG
968 const size_t OldSize = States.size();
969 #endif
970 Progress = false;
971 // We're only changing values in this loop, thus safe to keep iterators.
972 // Since this is computing a fixed point, the order of visit does not
973 // effect the result. TODO: We could use a worklist here and make this run
974 // much faster.
975 for (auto Pair : States) {
976 Value *BDV = Pair.first;
977 // Only values that do not have known bases or those that have differing
978 // type (scalar versus vector) from a possible known base should be in the
979 // lattice.
980 assert((!isKnownBaseResult(BDV) ||
981 !areBothVectorOrScalar(BDV, Pair.second.getBaseValue())) &&
982 "why did it get added?");
983
984 BDVState NewState(BDV);
985 visitBDVOperands(BDV, [&](Value *Op) {
986 Value *BDV = findBaseOrBDV(Op, Cache);
987 auto OpState = GetStateForBDV(BDV, Op);
988 NewState.meet(OpState);
989 });
990
991 BDVState OldState = States[BDV];
992 if (OldState != NewState) {
993 Progress = true;
994 States[BDV] = NewState;
995 }
996 }
997
998 assert(OldSize == States.size() &&
999 "fixed point shouldn't be adding any new nodes to state");
1000 }
1001
1002 #ifndef NDEBUG
1003 VerifyStates();
1004 LLVM_DEBUG(dbgs() << "States after meet iteration:\n");
1005 for (auto Pair : States) {
1006 LLVM_DEBUG(dbgs() << " " << Pair.second << " for " << *Pair.first << "\n");
1007 }
1008 #endif
1009
1010 // Handle all instructions that have a vector BDV, but the instruction itself
1011 // is of scalar type.
1012 for (auto Pair : States) {
1013 Instruction *I = cast<Instruction>(Pair.first);
1014 BDVState State = Pair.second;
1015 auto *BaseValue = State.getBaseValue();
1016 // Only values that do not have known bases or those that have differing
1017 // type (scalar versus vector) from a possible known base should be in the
1018 // lattice.
1019 assert((!isKnownBaseResult(I) || !areBothVectorOrScalar(I, BaseValue)) &&
1020 "why did it get added?");
1021 assert(!State.isUnknown() && "Optimistic algorithm didn't complete!");
1022
1023 if (!State.isBase() || !isa<VectorType>(BaseValue->getType()))
1024 continue;
1025 // extractelement instructions are a bit special in that we may need to
1026 // insert an extract even when we know an exact base for the instruction.
1027 // The problem is that we need to convert from a vector base to a scalar
1028 // base for the particular indice we're interested in.
1029 if (isa<ExtractElementInst>(I)) {
1030 auto *EE = cast<ExtractElementInst>(I);
1031 // TODO: In many cases, the new instruction is just EE itself. We should
1032 // exploit this, but can't do it here since it would break the invariant
1033 // about the BDV not being known to be a base.
1034 auto *BaseInst = ExtractElementInst::Create(
1035 State.getBaseValue(), EE->getIndexOperand(), "base_ee", EE);
1036 BaseInst->setMetadata("is_base_value", MDNode::get(I->getContext(), {}));
1037 States[I] = BDVState(I, BDVState::Base, BaseInst);
1038 } else if (!isa<VectorType>(I->getType())) {
1039 // We need to handle cases that have a vector base but the instruction is
1040 // a scalar type (these could be phis or selects or any instruction that
1041 // are of scalar type, but the base can be a vector type). We
1042 // conservatively set this as conflict. Setting the base value for these
1043 // conflicts is handled in the next loop which traverses States.
1044 States[I] = BDVState(I, BDVState::Conflict);
1045 }
1046 }
1047
1048 #ifndef NDEBUG
1049 VerifyStates();
1050 #endif
1051
1052 // Insert Phis for all conflicts
1053 // TODO: adjust naming patterns to avoid this order of iteration dependency
1054 for (auto Pair : States) {
1055 Instruction *I = cast<Instruction>(Pair.first);
1056 BDVState State = Pair.second;
1057 // Only values that do not have known bases or those that have differing
1058 // type (scalar versus vector) from a possible known base should be in the
1059 // lattice.
1060 assert((!isKnownBaseResult(I) || !areBothVectorOrScalar(I, State.getBaseValue())) &&
1061 "why did it get added?");
1062 assert(!State.isUnknown() && "Optimistic algorithm didn't complete!");
1063
1064 // Since we're joining a vector and scalar base, they can never be the
1065 // same. As a result, we should always see insert element having reached
1066 // the conflict state.
1067 assert(!isa<InsertElementInst>(I) || State.isConflict());
1068
1069 if (!State.isConflict())
1070 continue;
1071
1072 auto getMangledName = [](Instruction *I) -> std::string {
1073 if (isa<PHINode>(I)) {
1074 return suffixed_name_or(I, ".base", "base_phi");
1075 } else if (isa<SelectInst>(I)) {
1076 return suffixed_name_or(I, ".base", "base_select");
1077 } else if (isa<ExtractElementInst>(I)) {
1078 return suffixed_name_or(I, ".base", "base_ee");
1079 } else if (isa<InsertElementInst>(I)) {
1080 return suffixed_name_or(I, ".base", "base_ie");
1081 } else {
1082 return suffixed_name_or(I, ".base", "base_sv");
1083 }
1084 };
1085
1086 Instruction *BaseInst = I->clone();
1087 BaseInst->insertBefore(I);
1088 BaseInst->setName(getMangledName(I));
1089 // Add metadata marking this as a base value
1090 BaseInst->setMetadata("is_base_value", MDNode::get(I->getContext(), {}));
1091 States[I] = BDVState(I, BDVState::Conflict, BaseInst);
1092 }
1093
1094 #ifndef NDEBUG
1095 VerifyStates();
1096 #endif
1097
1098 // Returns a instruction which produces the base pointer for a given
1099 // instruction. The instruction is assumed to be an input to one of the BDVs
1100 // seen in the inference algorithm above. As such, we must either already
1101 // know it's base defining value is a base, or have inserted a new
1102 // instruction to propagate the base of it's BDV and have entered that newly
1103 // introduced instruction into the state table. In either case, we are
1104 // assured to be able to determine an instruction which produces it's base
1105 // pointer.
1106 auto getBaseForInput = [&](Value *Input, Instruction *InsertPt) {
1107 Value *BDV = findBaseOrBDV(Input, Cache);
1108 Value *Base = nullptr;
1109 if (!States.count(BDV)) {
1110 assert(areBothVectorOrScalar(BDV, Input));
1111 Base = BDV;
1112 } else {
1113 // Either conflict or base.
1114 assert(States.count(BDV));
1115 Base = States[BDV].getBaseValue();
1116 }
1117 assert(Base && "Can't be null");
1118 // The cast is needed since base traversal may strip away bitcasts
1119 if (Base->getType() != Input->getType() && InsertPt)
1120 Base = new BitCastInst(Base, Input->getType(), "cast", InsertPt);
1121 return Base;
1122 };
1123
1124 // Fixup all the inputs of the new PHIs. Visit order needs to be
1125 // deterministic and predictable because we're naming newly created
1126 // instructions.
1127 for (auto Pair : States) {
1128 Instruction *BDV = cast<Instruction>(Pair.first);
1129 BDVState State = Pair.second;
1130
1131 // Only values that do not have known bases or those that have differing
1132 // type (scalar versus vector) from a possible known base should be in the
1133 // lattice.
1134 assert((!isKnownBaseResult(BDV) ||
1135 !areBothVectorOrScalar(BDV, State.getBaseValue())) &&
1136 "why did it get added?");
1137 assert(!State.isUnknown() && "Optimistic algorithm didn't complete!");
1138 if (!State.isConflict())
1139 continue;
1140
1141 if (PHINode *BasePHI = dyn_cast<PHINode>(State.getBaseValue())) {
1142 PHINode *PN = cast<PHINode>(BDV);
1143 const unsigned NumPHIValues = PN->getNumIncomingValues();
1144
1145 // The IR verifier requires phi nodes with multiple entries from the
1146 // same basic block to have the same incoming value for each of those
1147 // entries. Since we're inserting bitcasts in the loop, make sure we
1148 // do so at least once per incoming block.
1149 DenseMap<BasicBlock *, Value*> BlockToValue;
1150 for (unsigned i = 0; i < NumPHIValues; i++) {
1151 Value *InVal = PN->getIncomingValue(i);
1152 BasicBlock *InBB = PN->getIncomingBlock(i);
1153 if (!BlockToValue.count(InBB))
1154 BlockToValue[InBB] = getBaseForInput(InVal, InBB->getTerminator());
1155 else {
1156 #ifndef NDEBUG
1157 Value *OldBase = BlockToValue[InBB];
1158 Value *Base = getBaseForInput(InVal, nullptr);
1159 // In essence this assert states: the only way two values
1160 // incoming from the same basic block may be different is by
1161 // being different bitcasts of the same value. A cleanup
1162 // that remains TODO is changing findBaseOrBDV to return an
1163 // llvm::Value of the correct type (and still remain pure).
1164 // This will remove the need to add bitcasts.
1165 assert(Base->stripPointerCasts() == OldBase->stripPointerCasts() &&
1166 "Sanity -- findBaseOrBDV should be pure!");
1167 #endif
1168 }
1169 Value *Base = BlockToValue[InBB];
1170 BasePHI->setIncomingValue(i, Base);
1171 }
1172 } else if (SelectInst *BaseSI =
1173 dyn_cast<SelectInst>(State.getBaseValue())) {
1174 SelectInst *SI = cast<SelectInst>(BDV);
1175
1176 // Find the instruction which produces the base for each input.
1177 // We may need to insert a bitcast.
1178 BaseSI->setTrueValue(getBaseForInput(SI->getTrueValue(), BaseSI));
1179 BaseSI->setFalseValue(getBaseForInput(SI->getFalseValue(), BaseSI));
1180 } else if (auto *BaseEE =
1181 dyn_cast<ExtractElementInst>(State.getBaseValue())) {
1182 Value *InVal = cast<ExtractElementInst>(BDV)->getVectorOperand();
1183 // Find the instruction which produces the base for each input. We may
1184 // need to insert a bitcast.
1185 BaseEE->setOperand(0, getBaseForInput(InVal, BaseEE));
1186 } else if (auto *BaseIE = dyn_cast<InsertElementInst>(State.getBaseValue())){
1187 auto *BdvIE = cast<InsertElementInst>(BDV);
1188 auto UpdateOperand = [&](int OperandIdx) {
1189 Value *InVal = BdvIE->getOperand(OperandIdx);
1190 Value *Base = getBaseForInput(InVal, BaseIE);
1191 BaseIE->setOperand(OperandIdx, Base);
1192 };
1193 UpdateOperand(0); // vector operand
1194 UpdateOperand(1); // scalar operand
1195 } else {
1196 auto *BaseSV = cast<ShuffleVectorInst>(State.getBaseValue());
1197 auto *BdvSV = cast<ShuffleVectorInst>(BDV);
1198 auto UpdateOperand = [&](int OperandIdx) {
1199 Value *InVal = BdvSV->getOperand(OperandIdx);
1200 Value *Base = getBaseForInput(InVal, BaseSV);
1201 BaseSV->setOperand(OperandIdx, Base);
1202 };
1203 UpdateOperand(0); // vector operand
1204 if (!BdvSV->isZeroEltSplat())
1205 UpdateOperand(1); // vector operand
1206 else {
1207 // Never read, so just use undef
1208 Value *InVal = BdvSV->getOperand(1);
1209 BaseSV->setOperand(1, UndefValue::get(InVal->getType()));
1210 }
1211 }
1212 }
1213
1214 #ifndef NDEBUG
1215 VerifyStates();
1216 #endif
1217
1218 // Cache all of our results so we can cheaply reuse them
1219 // NOTE: This is actually two caches: one of the base defining value
1220 // relation and one of the base pointer relation! FIXME
1221 for (auto Pair : States) {
1222 auto *BDV = Pair.first;
1223 Value *Base = Pair.second.getBaseValue();
1224 assert(BDV && Base);
1225 // Only values that do not have known bases or those that have differing
1226 // type (scalar versus vector) from a possible known base should be in the
1227 // lattice.
1228 assert((!isKnownBaseResult(BDV) || !areBothVectorOrScalar(BDV, Base)) &&
1229 "why did it get added?");
1230
1231 LLVM_DEBUG(
1232 dbgs() << "Updating base value cache"
1233 << " for: " << BDV->getName() << " from: "
1234 << (Cache.count(BDV) ? Cache[BDV]->getName().str() : "none")
1235 << " to: " << Base->getName() << "\n");
1236
1237 Cache[BDV] = Base;
1238 }
1239 assert(Cache.count(Def));
1240 return Cache[Def];
1241 }
1242
1243 // For a set of live pointers (base and/or derived), identify the base
1244 // pointer of the object which they are derived from. This routine will
1245 // mutate the IR graph as needed to make the 'base' pointer live at the
1246 // definition site of 'derived'. This ensures that any use of 'derived' can
1247 // also use 'base'. This may involve the insertion of a number of
1248 // additional PHI nodes.
1249 //
1250 // preconditions: live is a set of pointer type Values
1251 //
1252 // side effects: may insert PHI nodes into the existing CFG, will preserve
1253 // CFG, will not remove or mutate any existing nodes
1254 //
1255 // post condition: PointerToBase contains one (derived, base) pair for every
1256 // pointer in live. Note that derived can be equal to base if the original
1257 // pointer was a base pointer.
1258 static void
findBasePointers(const StatepointLiveSetTy & live,MapVector<Value *,Value * > & PointerToBase,DominatorTree * DT,DefiningValueMapTy & DVCache)1259 findBasePointers(const StatepointLiveSetTy &live,
1260 MapVector<Value *, Value *> &PointerToBase,
1261 DominatorTree *DT, DefiningValueMapTy &DVCache) {
1262 for (Value *ptr : live) {
1263 Value *base = findBasePointer(ptr, DVCache);
1264 assert(base && "failed to find base pointer");
1265 PointerToBase[ptr] = base;
1266 assert((!isa<Instruction>(base) || !isa<Instruction>(ptr) ||
1267 DT->dominates(cast<Instruction>(base)->getParent(),
1268 cast<Instruction>(ptr)->getParent())) &&
1269 "The base we found better dominate the derived pointer");
1270 }
1271 }
1272
1273 /// Find the required based pointers (and adjust the live set) for the given
1274 /// parse point.
findBasePointers(DominatorTree & DT,DefiningValueMapTy & DVCache,CallBase * Call,PartiallyConstructedSafepointRecord & result)1275 static void findBasePointers(DominatorTree &DT, DefiningValueMapTy &DVCache,
1276 CallBase *Call,
1277 PartiallyConstructedSafepointRecord &result) {
1278 MapVector<Value *, Value *> PointerToBase;
1279 StatepointLiveSetTy PotentiallyDerivedPointers = result.LiveSet;
1280 // We assume that all pointers passed to deopt are base pointers; as an
1281 // optimization, we can use this to avoid seperately materializing the base
1282 // pointer graph. This is only relevant since we're very conservative about
1283 // generating new conflict nodes during base pointer insertion. If we were
1284 // smarter there, this would be irrelevant.
1285 if (auto Opt = Call->getOperandBundle(LLVMContext::OB_deopt))
1286 for (Value *V : Opt->Inputs) {
1287 if (!PotentiallyDerivedPointers.count(V))
1288 continue;
1289 PotentiallyDerivedPointers.remove(V);
1290 PointerToBase[V] = V;
1291 }
1292 findBasePointers(PotentiallyDerivedPointers, PointerToBase, &DT, DVCache);
1293
1294 if (PrintBasePointers) {
1295 errs() << "Base Pairs (w/o Relocation):\n";
1296 for (auto &Pair : PointerToBase) {
1297 errs() << " derived ";
1298 Pair.first->printAsOperand(errs(), false);
1299 errs() << " base ";
1300 Pair.second->printAsOperand(errs(), false);
1301 errs() << "\n";;
1302 }
1303 }
1304
1305 result.PointerToBase = PointerToBase;
1306 }
1307
1308 /// Given an updated version of the dataflow liveness results, update the
1309 /// liveset and base pointer maps for the call site CS.
1310 static void recomputeLiveInValues(GCPtrLivenessData &RevisedLivenessData,
1311 CallBase *Call,
1312 PartiallyConstructedSafepointRecord &result);
1313
recomputeLiveInValues(Function & F,DominatorTree & DT,ArrayRef<CallBase * > toUpdate,MutableArrayRef<struct PartiallyConstructedSafepointRecord> records)1314 static void recomputeLiveInValues(
1315 Function &F, DominatorTree &DT, ArrayRef<CallBase *> toUpdate,
1316 MutableArrayRef<struct PartiallyConstructedSafepointRecord> records) {
1317 // TODO-PERF: reuse the original liveness, then simply run the dataflow
1318 // again. The old values are still live and will help it stabilize quickly.
1319 GCPtrLivenessData RevisedLivenessData;
1320 computeLiveInValues(DT, F, RevisedLivenessData);
1321 for (size_t i = 0; i < records.size(); i++) {
1322 struct PartiallyConstructedSafepointRecord &info = records[i];
1323 recomputeLiveInValues(RevisedLivenessData, toUpdate[i], info);
1324 }
1325 }
1326
1327 // When inserting gc.relocate and gc.result calls, we need to ensure there are
1328 // no uses of the original value / return value between the gc.statepoint and
1329 // the gc.relocate / gc.result call. One case which can arise is a phi node
1330 // starting one of the successor blocks. We also need to be able to insert the
1331 // gc.relocates only on the path which goes through the statepoint. We might
1332 // need to split an edge to make this possible.
1333 static BasicBlock *
normalizeForInvokeSafepoint(BasicBlock * BB,BasicBlock * InvokeParent,DominatorTree & DT)1334 normalizeForInvokeSafepoint(BasicBlock *BB, BasicBlock *InvokeParent,
1335 DominatorTree &DT) {
1336 BasicBlock *Ret = BB;
1337 if (!BB->getUniquePredecessor())
1338 Ret = SplitBlockPredecessors(BB, InvokeParent, "", &DT);
1339
1340 // Now that 'Ret' has unique predecessor we can safely remove all phi nodes
1341 // from it
1342 FoldSingleEntryPHINodes(Ret);
1343 assert(!isa<PHINode>(Ret->begin()) &&
1344 "All PHI nodes should have been removed!");
1345
1346 // At this point, we can safely insert a gc.relocate or gc.result as the first
1347 // instruction in Ret if needed.
1348 return Ret;
1349 }
1350
1351 // List of all function attributes which must be stripped when lowering from
1352 // abstract machine model to physical machine model. Essentially, these are
1353 // all the effects a safepoint might have which we ignored in the abstract
1354 // machine model for purposes of optimization. We have to strip these on
1355 // both function declarations and call sites.
1356 static constexpr Attribute::AttrKind FnAttrsToStrip[] =
1357 {Attribute::ReadNone, Attribute::ReadOnly, Attribute::WriteOnly,
1358 Attribute::ArgMemOnly, Attribute::InaccessibleMemOnly,
1359 Attribute::InaccessibleMemOrArgMemOnly,
1360 Attribute::NoSync, Attribute::NoFree};
1361
1362 // List of all parameter and return attributes which must be stripped when
1363 // lowering from the abstract machine model. Note that we list attributes
1364 // here which aren't valid as return attributes, that is okay. There are
1365 // also some additional attributes with arguments which are handled
1366 // explicitly and are not in this list.
1367 static constexpr Attribute::AttrKind ParamAttrsToStrip[] =
1368 {Attribute::ReadNone, Attribute::ReadOnly, Attribute::WriteOnly,
1369 Attribute::NoAlias, Attribute::NoFree};
1370
1371
1372 // Create new attribute set containing only attributes which can be transferred
1373 // from original call to the safepoint.
legalizeCallAttributes(LLVMContext & Ctx,AttributeList AL)1374 static AttributeList legalizeCallAttributes(LLVMContext &Ctx,
1375 AttributeList AL) {
1376 if (AL.isEmpty())
1377 return AL;
1378
1379 // Remove the readonly, readnone, and statepoint function attributes.
1380 AttrBuilder FnAttrs = AL.getFnAttributes();
1381 for (auto Attr : FnAttrsToStrip)
1382 FnAttrs.removeAttribute(Attr);
1383
1384 for (Attribute A : AL.getFnAttributes()) {
1385 if (isStatepointDirectiveAttr(A))
1386 FnAttrs.remove(A);
1387 }
1388
1389 // Just skip parameter and return attributes for now
1390 return AttributeList::get(Ctx, AttributeList::FunctionIndex,
1391 AttributeSet::get(Ctx, FnAttrs));
1392 }
1393
1394 /// Helper function to place all gc relocates necessary for the given
1395 /// statepoint.
1396 /// Inputs:
1397 /// liveVariables - list of variables to be relocated.
1398 /// basePtrs - base pointers.
1399 /// statepointToken - statepoint instruction to which relocates should be
1400 /// bound.
1401 /// Builder - Llvm IR builder to be used to construct new calls.
CreateGCRelocates(ArrayRef<Value * > LiveVariables,ArrayRef<Value * > BasePtrs,Instruction * StatepointToken,IRBuilder<> & Builder)1402 static void CreateGCRelocates(ArrayRef<Value *> LiveVariables,
1403 ArrayRef<Value *> BasePtrs,
1404 Instruction *StatepointToken,
1405 IRBuilder<> &Builder) {
1406 if (LiveVariables.empty())
1407 return;
1408
1409 auto FindIndex = [](ArrayRef<Value *> LiveVec, Value *Val) {
1410 auto ValIt = llvm::find(LiveVec, Val);
1411 assert(ValIt != LiveVec.end() && "Val not found in LiveVec!");
1412 size_t Index = std::distance(LiveVec.begin(), ValIt);
1413 assert(Index < LiveVec.size() && "Bug in std::find?");
1414 return Index;
1415 };
1416 Module *M = StatepointToken->getModule();
1417
1418 // All gc_relocate are generated as i8 addrspace(1)* (or a vector type whose
1419 // element type is i8 addrspace(1)*). We originally generated unique
1420 // declarations for each pointer type, but this proved problematic because
1421 // the intrinsic mangling code is incomplete and fragile. Since we're moving
1422 // towards a single unified pointer type anyways, we can just cast everything
1423 // to an i8* of the right address space. A bitcast is added later to convert
1424 // gc_relocate to the actual value's type.
1425 auto getGCRelocateDecl = [&] (Type *Ty) {
1426 assert(isHandledGCPointerType(Ty));
1427 auto AS = Ty->getScalarType()->getPointerAddressSpace();
1428 Type *NewTy = Type::getInt8PtrTy(M->getContext(), AS);
1429 if (auto *VT = dyn_cast<VectorType>(Ty))
1430 NewTy = FixedVectorType::get(NewTy,
1431 cast<FixedVectorType>(VT)->getNumElements());
1432 return Intrinsic::getDeclaration(M, Intrinsic::experimental_gc_relocate,
1433 {NewTy});
1434 };
1435
1436 // Lazily populated map from input types to the canonicalized form mentioned
1437 // in the comment above. This should probably be cached somewhere more
1438 // broadly.
1439 DenseMap<Type *, Function *> TypeToDeclMap;
1440
1441 for (unsigned i = 0; i < LiveVariables.size(); i++) {
1442 // Generate the gc.relocate call and save the result
1443 Value *BaseIdx = Builder.getInt32(FindIndex(LiveVariables, BasePtrs[i]));
1444 Value *LiveIdx = Builder.getInt32(i);
1445
1446 Type *Ty = LiveVariables[i]->getType();
1447 if (!TypeToDeclMap.count(Ty))
1448 TypeToDeclMap[Ty] = getGCRelocateDecl(Ty);
1449 Function *GCRelocateDecl = TypeToDeclMap[Ty];
1450
1451 // only specify a debug name if we can give a useful one
1452 CallInst *Reloc = Builder.CreateCall(
1453 GCRelocateDecl, {StatepointToken, BaseIdx, LiveIdx},
1454 suffixed_name_or(LiveVariables[i], ".relocated", ""));
1455 // Trick CodeGen into thinking there are lots of free registers at this
1456 // fake call.
1457 Reloc->setCallingConv(CallingConv::Cold);
1458 }
1459 }
1460
1461 namespace {
1462
1463 /// This struct is used to defer RAUWs and `eraseFromParent` s. Using this
1464 /// avoids having to worry about keeping around dangling pointers to Values.
1465 class DeferredReplacement {
1466 AssertingVH<Instruction> Old;
1467 AssertingVH<Instruction> New;
1468 bool IsDeoptimize = false;
1469
1470 DeferredReplacement() = default;
1471
1472 public:
createRAUW(Instruction * Old,Instruction * New)1473 static DeferredReplacement createRAUW(Instruction *Old, Instruction *New) {
1474 assert(Old != New && Old && New &&
1475 "Cannot RAUW equal values or to / from null!");
1476
1477 DeferredReplacement D;
1478 D.Old = Old;
1479 D.New = New;
1480 return D;
1481 }
1482
createDelete(Instruction * ToErase)1483 static DeferredReplacement createDelete(Instruction *ToErase) {
1484 DeferredReplacement D;
1485 D.Old = ToErase;
1486 return D;
1487 }
1488
createDeoptimizeReplacement(Instruction * Old)1489 static DeferredReplacement createDeoptimizeReplacement(Instruction *Old) {
1490 #ifndef NDEBUG
1491 auto *F = cast<CallInst>(Old)->getCalledFunction();
1492 assert(F && F->getIntrinsicID() == Intrinsic::experimental_deoptimize &&
1493 "Only way to construct a deoptimize deferred replacement");
1494 #endif
1495 DeferredReplacement D;
1496 D.Old = Old;
1497 D.IsDeoptimize = true;
1498 return D;
1499 }
1500
1501 /// Does the task represented by this instance.
doReplacement()1502 void doReplacement() {
1503 Instruction *OldI = Old;
1504 Instruction *NewI = New;
1505
1506 assert(OldI != NewI && "Disallowed at construction?!");
1507 assert((!IsDeoptimize || !New) &&
1508 "Deoptimize intrinsics are not replaced!");
1509
1510 Old = nullptr;
1511 New = nullptr;
1512
1513 if (NewI)
1514 OldI->replaceAllUsesWith(NewI);
1515
1516 if (IsDeoptimize) {
1517 // Note: we've inserted instructions, so the call to llvm.deoptimize may
1518 // not necessarily be followed by the matching return.
1519 auto *RI = cast<ReturnInst>(OldI->getParent()->getTerminator());
1520 new UnreachableInst(RI->getContext(), RI);
1521 RI->eraseFromParent();
1522 }
1523
1524 OldI->eraseFromParent();
1525 }
1526 };
1527
1528 } // end anonymous namespace
1529
getDeoptLowering(CallBase * Call)1530 static StringRef getDeoptLowering(CallBase *Call) {
1531 const char *DeoptLowering = "deopt-lowering";
1532 if (Call->hasFnAttr(DeoptLowering)) {
1533 // FIXME: Calls have a *really* confusing interface around attributes
1534 // with values.
1535 const AttributeList &CSAS = Call->getAttributes();
1536 if (CSAS.hasAttribute(AttributeList::FunctionIndex, DeoptLowering))
1537 return CSAS.getAttribute(AttributeList::FunctionIndex, DeoptLowering)
1538 .getValueAsString();
1539 Function *F = Call->getCalledFunction();
1540 assert(F && F->hasFnAttribute(DeoptLowering));
1541 return F->getFnAttribute(DeoptLowering).getValueAsString();
1542 }
1543 return "live-through";
1544 }
1545
1546 static void
makeStatepointExplicitImpl(CallBase * Call,const SmallVectorImpl<Value * > & BasePtrs,const SmallVectorImpl<Value * > & LiveVariables,PartiallyConstructedSafepointRecord & Result,std::vector<DeferredReplacement> & Replacements)1547 makeStatepointExplicitImpl(CallBase *Call, /* to replace */
1548 const SmallVectorImpl<Value *> &BasePtrs,
1549 const SmallVectorImpl<Value *> &LiveVariables,
1550 PartiallyConstructedSafepointRecord &Result,
1551 std::vector<DeferredReplacement> &Replacements) {
1552 assert(BasePtrs.size() == LiveVariables.size());
1553
1554 // Then go ahead and use the builder do actually do the inserts. We insert
1555 // immediately before the previous instruction under the assumption that all
1556 // arguments will be available here. We can't insert afterwards since we may
1557 // be replacing a terminator.
1558 IRBuilder<> Builder(Call);
1559
1560 ArrayRef<Value *> GCArgs(LiveVariables);
1561 uint64_t StatepointID = StatepointDirectives::DefaultStatepointID;
1562 uint32_t NumPatchBytes = 0;
1563 uint32_t Flags = uint32_t(StatepointFlags::None);
1564
1565 SmallVector<Value *, 8> CallArgs(Call->args());
1566 Optional<ArrayRef<Use>> DeoptArgs;
1567 if (auto Bundle = Call->getOperandBundle(LLVMContext::OB_deopt))
1568 DeoptArgs = Bundle->Inputs;
1569 Optional<ArrayRef<Use>> TransitionArgs;
1570 if (auto Bundle = Call->getOperandBundle(LLVMContext::OB_gc_transition)) {
1571 TransitionArgs = Bundle->Inputs;
1572 // TODO: This flag no longer serves a purpose and can be removed later
1573 Flags |= uint32_t(StatepointFlags::GCTransition);
1574 }
1575
1576 // Instead of lowering calls to @llvm.experimental.deoptimize as normal calls
1577 // with a return value, we lower then as never returning calls to
1578 // __llvm_deoptimize that are followed by unreachable to get better codegen.
1579 bool IsDeoptimize = false;
1580
1581 StatepointDirectives SD =
1582 parseStatepointDirectivesFromAttrs(Call->getAttributes());
1583 if (SD.NumPatchBytes)
1584 NumPatchBytes = *SD.NumPatchBytes;
1585 if (SD.StatepointID)
1586 StatepointID = *SD.StatepointID;
1587
1588 // Pass through the requested lowering if any. The default is live-through.
1589 StringRef DeoptLowering = getDeoptLowering(Call);
1590 if (DeoptLowering.equals("live-in"))
1591 Flags |= uint32_t(StatepointFlags::DeoptLiveIn);
1592 else {
1593 assert(DeoptLowering.equals("live-through") && "Unsupported value!");
1594 }
1595
1596 Value *CallTarget = Call->getCalledOperand();
1597 if (Function *F = dyn_cast<Function>(CallTarget)) {
1598 auto IID = F->getIntrinsicID();
1599 if (IID == Intrinsic::experimental_deoptimize) {
1600 // Calls to llvm.experimental.deoptimize are lowered to calls to the
1601 // __llvm_deoptimize symbol. We want to resolve this now, since the
1602 // verifier does not allow taking the address of an intrinsic function.
1603
1604 SmallVector<Type *, 8> DomainTy;
1605 for (Value *Arg : CallArgs)
1606 DomainTy.push_back(Arg->getType());
1607 auto *FTy = FunctionType::get(Type::getVoidTy(F->getContext()), DomainTy,
1608 /* isVarArg = */ false);
1609
1610 // Note: CallTarget can be a bitcast instruction of a symbol if there are
1611 // calls to @llvm.experimental.deoptimize with different argument types in
1612 // the same module. This is fine -- we assume the frontend knew what it
1613 // was doing when generating this kind of IR.
1614 CallTarget = F->getParent()
1615 ->getOrInsertFunction("__llvm_deoptimize", FTy)
1616 .getCallee();
1617
1618 IsDeoptimize = true;
1619 } else if (IID == Intrinsic::memcpy_element_unordered_atomic ||
1620 IID == Intrinsic::memmove_element_unordered_atomic) {
1621 // Unordered atomic memcpy and memmove intrinsics which are not explicitly
1622 // marked as "gc-leaf-function" should be lowered in a GC parseable way.
1623 // Specifically, these calls should be lowered to the
1624 // __llvm_{memcpy|memmove}_element_unordered_atomic_safepoint symbols.
1625 // Similarly to __llvm_deoptimize we want to resolve this now, since the
1626 // verifier does not allow taking the address of an intrinsic function.
1627 //
1628 // Moreover we need to shuffle the arguments for the call in order to
1629 // accommodate GC. The underlying source and destination objects might be
1630 // relocated during copy operation should the GC occur. To relocate the
1631 // derived source and destination pointers the implementation of the
1632 // intrinsic should know the corresponding base pointers.
1633 //
1634 // To make the base pointers available pass them explicitly as arguments:
1635 // memcpy(dest_derived, source_derived, ...) =>
1636 // memcpy(dest_base, dest_offset, source_base, source_offset, ...)
1637 auto &Context = Call->getContext();
1638 auto &DL = Call->getModule()->getDataLayout();
1639 auto GetBaseAndOffset = [&](Value *Derived) {
1640 assert(Result.PointerToBase.count(Derived));
1641 unsigned AddressSpace = Derived->getType()->getPointerAddressSpace();
1642 unsigned IntPtrSize = DL.getPointerSizeInBits(AddressSpace);
1643 Value *Base = Result.PointerToBase.find(Derived)->second;
1644 Value *Base_int = Builder.CreatePtrToInt(
1645 Base, Type::getIntNTy(Context, IntPtrSize));
1646 Value *Derived_int = Builder.CreatePtrToInt(
1647 Derived, Type::getIntNTy(Context, IntPtrSize));
1648 return std::make_pair(Base, Builder.CreateSub(Derived_int, Base_int));
1649 };
1650
1651 auto *Dest = CallArgs[0];
1652 Value *DestBase, *DestOffset;
1653 std::tie(DestBase, DestOffset) = GetBaseAndOffset(Dest);
1654
1655 auto *Source = CallArgs[1];
1656 Value *SourceBase, *SourceOffset;
1657 std::tie(SourceBase, SourceOffset) = GetBaseAndOffset(Source);
1658
1659 auto *LengthInBytes = CallArgs[2];
1660 auto *ElementSizeCI = cast<ConstantInt>(CallArgs[3]);
1661
1662 CallArgs.clear();
1663 CallArgs.push_back(DestBase);
1664 CallArgs.push_back(DestOffset);
1665 CallArgs.push_back(SourceBase);
1666 CallArgs.push_back(SourceOffset);
1667 CallArgs.push_back(LengthInBytes);
1668
1669 SmallVector<Type *, 8> DomainTy;
1670 for (Value *Arg : CallArgs)
1671 DomainTy.push_back(Arg->getType());
1672 auto *FTy = FunctionType::get(Type::getVoidTy(F->getContext()), DomainTy,
1673 /* isVarArg = */ false);
1674
1675 auto GetFunctionName = [](Intrinsic::ID IID, ConstantInt *ElementSizeCI) {
1676 uint64_t ElementSize = ElementSizeCI->getZExtValue();
1677 if (IID == Intrinsic::memcpy_element_unordered_atomic) {
1678 switch (ElementSize) {
1679 case 1:
1680 return "__llvm_memcpy_element_unordered_atomic_safepoint_1";
1681 case 2:
1682 return "__llvm_memcpy_element_unordered_atomic_safepoint_2";
1683 case 4:
1684 return "__llvm_memcpy_element_unordered_atomic_safepoint_4";
1685 case 8:
1686 return "__llvm_memcpy_element_unordered_atomic_safepoint_8";
1687 case 16:
1688 return "__llvm_memcpy_element_unordered_atomic_safepoint_16";
1689 default:
1690 llvm_unreachable("unexpected element size!");
1691 }
1692 }
1693 assert(IID == Intrinsic::memmove_element_unordered_atomic);
1694 switch (ElementSize) {
1695 case 1:
1696 return "__llvm_memmove_element_unordered_atomic_safepoint_1";
1697 case 2:
1698 return "__llvm_memmove_element_unordered_atomic_safepoint_2";
1699 case 4:
1700 return "__llvm_memmove_element_unordered_atomic_safepoint_4";
1701 case 8:
1702 return "__llvm_memmove_element_unordered_atomic_safepoint_8";
1703 case 16:
1704 return "__llvm_memmove_element_unordered_atomic_safepoint_16";
1705 default:
1706 llvm_unreachable("unexpected element size!");
1707 }
1708 };
1709
1710 CallTarget =
1711 F->getParent()
1712 ->getOrInsertFunction(GetFunctionName(IID, ElementSizeCI), FTy)
1713 .getCallee();
1714 }
1715 }
1716
1717 // Create the statepoint given all the arguments
1718 GCStatepointInst *Token = nullptr;
1719 if (auto *CI = dyn_cast<CallInst>(Call)) {
1720 CallInst *SPCall = Builder.CreateGCStatepointCall(
1721 StatepointID, NumPatchBytes, CallTarget, Flags, CallArgs,
1722 TransitionArgs, DeoptArgs, GCArgs, "safepoint_token");
1723
1724 SPCall->setTailCallKind(CI->getTailCallKind());
1725 SPCall->setCallingConv(CI->getCallingConv());
1726
1727 // Currently we will fail on parameter attributes and on certain
1728 // function attributes. In case if we can handle this set of attributes -
1729 // set up function attrs directly on statepoint and return attrs later for
1730 // gc_result intrinsic.
1731 SPCall->setAttributes(
1732 legalizeCallAttributes(CI->getContext(), CI->getAttributes()));
1733
1734 Token = cast<GCStatepointInst>(SPCall);
1735
1736 // Put the following gc_result and gc_relocate calls immediately after the
1737 // the old call (which we're about to delete)
1738 assert(CI->getNextNode() && "Not a terminator, must have next!");
1739 Builder.SetInsertPoint(CI->getNextNode());
1740 Builder.SetCurrentDebugLocation(CI->getNextNode()->getDebugLoc());
1741 } else {
1742 auto *II = cast<InvokeInst>(Call);
1743
1744 // Insert the new invoke into the old block. We'll remove the old one in a
1745 // moment at which point this will become the new terminator for the
1746 // original block.
1747 InvokeInst *SPInvoke = Builder.CreateGCStatepointInvoke(
1748 StatepointID, NumPatchBytes, CallTarget, II->getNormalDest(),
1749 II->getUnwindDest(), Flags, CallArgs, TransitionArgs, DeoptArgs, GCArgs,
1750 "statepoint_token");
1751
1752 SPInvoke->setCallingConv(II->getCallingConv());
1753
1754 // Currently we will fail on parameter attributes and on certain
1755 // function attributes. In case if we can handle this set of attributes -
1756 // set up function attrs directly on statepoint and return attrs later for
1757 // gc_result intrinsic.
1758 SPInvoke->setAttributes(
1759 legalizeCallAttributes(II->getContext(), II->getAttributes()));
1760
1761 Token = cast<GCStatepointInst>(SPInvoke);
1762
1763 // Generate gc relocates in exceptional path
1764 BasicBlock *UnwindBlock = II->getUnwindDest();
1765 assert(!isa<PHINode>(UnwindBlock->begin()) &&
1766 UnwindBlock->getUniquePredecessor() &&
1767 "can't safely insert in this block!");
1768
1769 Builder.SetInsertPoint(&*UnwindBlock->getFirstInsertionPt());
1770 Builder.SetCurrentDebugLocation(II->getDebugLoc());
1771
1772 // Attach exceptional gc relocates to the landingpad.
1773 Instruction *ExceptionalToken = UnwindBlock->getLandingPadInst();
1774 Result.UnwindToken = ExceptionalToken;
1775
1776 CreateGCRelocates(LiveVariables, BasePtrs, ExceptionalToken, Builder);
1777
1778 // Generate gc relocates and returns for normal block
1779 BasicBlock *NormalDest = II->getNormalDest();
1780 assert(!isa<PHINode>(NormalDest->begin()) &&
1781 NormalDest->getUniquePredecessor() &&
1782 "can't safely insert in this block!");
1783
1784 Builder.SetInsertPoint(&*NormalDest->getFirstInsertionPt());
1785
1786 // gc relocates will be generated later as if it were regular call
1787 // statepoint
1788 }
1789 assert(Token && "Should be set in one of the above branches!");
1790
1791 if (IsDeoptimize) {
1792 // If we're wrapping an @llvm.experimental.deoptimize in a statepoint, we
1793 // transform the tail-call like structure to a call to a void function
1794 // followed by unreachable to get better codegen.
1795 Replacements.push_back(
1796 DeferredReplacement::createDeoptimizeReplacement(Call));
1797 } else {
1798 Token->setName("statepoint_token");
1799 if (!Call->getType()->isVoidTy() && !Call->use_empty()) {
1800 StringRef Name = Call->hasName() ? Call->getName() : "";
1801 CallInst *GCResult = Builder.CreateGCResult(Token, Call->getType(), Name);
1802 GCResult->setAttributes(
1803 AttributeList::get(GCResult->getContext(), AttributeList::ReturnIndex,
1804 Call->getAttributes().getRetAttributes()));
1805
1806 // We cannot RAUW or delete CS.getInstruction() because it could be in the
1807 // live set of some other safepoint, in which case that safepoint's
1808 // PartiallyConstructedSafepointRecord will hold a raw pointer to this
1809 // llvm::Instruction. Instead, we defer the replacement and deletion to
1810 // after the live sets have been made explicit in the IR, and we no longer
1811 // have raw pointers to worry about.
1812 Replacements.emplace_back(
1813 DeferredReplacement::createRAUW(Call, GCResult));
1814 } else {
1815 Replacements.emplace_back(DeferredReplacement::createDelete(Call));
1816 }
1817 }
1818
1819 Result.StatepointToken = Token;
1820
1821 // Second, create a gc.relocate for every live variable
1822 CreateGCRelocates(LiveVariables, BasePtrs, Token, Builder);
1823 }
1824
1825 // Replace an existing gc.statepoint with a new one and a set of gc.relocates
1826 // which make the relocations happening at this safepoint explicit.
1827 //
1828 // WARNING: Does not do any fixup to adjust users of the original live
1829 // values. That's the callers responsibility.
1830 static void
makeStatepointExplicit(DominatorTree & DT,CallBase * Call,PartiallyConstructedSafepointRecord & Result,std::vector<DeferredReplacement> & Replacements)1831 makeStatepointExplicit(DominatorTree &DT, CallBase *Call,
1832 PartiallyConstructedSafepointRecord &Result,
1833 std::vector<DeferredReplacement> &Replacements) {
1834 const auto &LiveSet = Result.LiveSet;
1835 const auto &PointerToBase = Result.PointerToBase;
1836
1837 // Convert to vector for efficient cross referencing.
1838 SmallVector<Value *, 64> BaseVec, LiveVec;
1839 LiveVec.reserve(LiveSet.size());
1840 BaseVec.reserve(LiveSet.size());
1841 for (Value *L : LiveSet) {
1842 LiveVec.push_back(L);
1843 assert(PointerToBase.count(L));
1844 Value *Base = PointerToBase.find(L)->second;
1845 BaseVec.push_back(Base);
1846 }
1847 assert(LiveVec.size() == BaseVec.size());
1848
1849 // Do the actual rewriting and delete the old statepoint
1850 makeStatepointExplicitImpl(Call, BaseVec, LiveVec, Result, Replacements);
1851 }
1852
1853 // Helper function for the relocationViaAlloca.
1854 //
1855 // It receives iterator to the statepoint gc relocates and emits a store to the
1856 // assigned location (via allocaMap) for the each one of them. It adds the
1857 // visited values into the visitedLiveValues set, which we will later use them
1858 // for sanity checking.
1859 static void
insertRelocationStores(iterator_range<Value::user_iterator> GCRelocs,DenseMap<Value *,AllocaInst * > & AllocaMap,DenseSet<Value * > & VisitedLiveValues)1860 insertRelocationStores(iterator_range<Value::user_iterator> GCRelocs,
1861 DenseMap<Value *, AllocaInst *> &AllocaMap,
1862 DenseSet<Value *> &VisitedLiveValues) {
1863 for (User *U : GCRelocs) {
1864 GCRelocateInst *Relocate = dyn_cast<GCRelocateInst>(U);
1865 if (!Relocate)
1866 continue;
1867
1868 Value *OriginalValue = Relocate->getDerivedPtr();
1869 assert(AllocaMap.count(OriginalValue));
1870 Value *Alloca = AllocaMap[OriginalValue];
1871
1872 // Emit store into the related alloca
1873 // All gc_relocates are i8 addrspace(1)* typed, and it must be bitcasted to
1874 // the correct type according to alloca.
1875 assert(Relocate->getNextNode() &&
1876 "Should always have one since it's not a terminator");
1877 IRBuilder<> Builder(Relocate->getNextNode());
1878 Value *CastedRelocatedValue =
1879 Builder.CreateBitCast(Relocate,
1880 cast<AllocaInst>(Alloca)->getAllocatedType(),
1881 suffixed_name_or(Relocate, ".casted", ""));
1882
1883 new StoreInst(CastedRelocatedValue, Alloca,
1884 cast<Instruction>(CastedRelocatedValue)->getNextNode());
1885
1886 #ifndef NDEBUG
1887 VisitedLiveValues.insert(OriginalValue);
1888 #endif
1889 }
1890 }
1891
1892 // Helper function for the "relocationViaAlloca". Similar to the
1893 // "insertRelocationStores" but works for rematerialized values.
insertRematerializationStores(const RematerializedValueMapTy & RematerializedValues,DenseMap<Value *,AllocaInst * > & AllocaMap,DenseSet<Value * > & VisitedLiveValues)1894 static void insertRematerializationStores(
1895 const RematerializedValueMapTy &RematerializedValues,
1896 DenseMap<Value *, AllocaInst *> &AllocaMap,
1897 DenseSet<Value *> &VisitedLiveValues) {
1898 for (auto RematerializedValuePair: RematerializedValues) {
1899 Instruction *RematerializedValue = RematerializedValuePair.first;
1900 Value *OriginalValue = RematerializedValuePair.second;
1901
1902 assert(AllocaMap.count(OriginalValue) &&
1903 "Can not find alloca for rematerialized value");
1904 Value *Alloca = AllocaMap[OriginalValue];
1905
1906 new StoreInst(RematerializedValue, Alloca,
1907 RematerializedValue->getNextNode());
1908
1909 #ifndef NDEBUG
1910 VisitedLiveValues.insert(OriginalValue);
1911 #endif
1912 }
1913 }
1914
1915 /// Do all the relocation update via allocas and mem2reg
relocationViaAlloca(Function & F,DominatorTree & DT,ArrayRef<Value * > Live,ArrayRef<PartiallyConstructedSafepointRecord> Records)1916 static void relocationViaAlloca(
1917 Function &F, DominatorTree &DT, ArrayRef<Value *> Live,
1918 ArrayRef<PartiallyConstructedSafepointRecord> Records) {
1919 #ifndef NDEBUG
1920 // record initial number of (static) allocas; we'll check we have the same
1921 // number when we get done.
1922 int InitialAllocaNum = 0;
1923 for (Instruction &I : F.getEntryBlock())
1924 if (isa<AllocaInst>(I))
1925 InitialAllocaNum++;
1926 #endif
1927
1928 // TODO-PERF: change data structures, reserve
1929 DenseMap<Value *, AllocaInst *> AllocaMap;
1930 SmallVector<AllocaInst *, 200> PromotableAllocas;
1931 // Used later to chack that we have enough allocas to store all values
1932 std::size_t NumRematerializedValues = 0;
1933 PromotableAllocas.reserve(Live.size());
1934
1935 // Emit alloca for "LiveValue" and record it in "allocaMap" and
1936 // "PromotableAllocas"
1937 const DataLayout &DL = F.getParent()->getDataLayout();
1938 auto emitAllocaFor = [&](Value *LiveValue) {
1939 AllocaInst *Alloca = new AllocaInst(LiveValue->getType(),
1940 DL.getAllocaAddrSpace(), "",
1941 F.getEntryBlock().getFirstNonPHI());
1942 AllocaMap[LiveValue] = Alloca;
1943 PromotableAllocas.push_back(Alloca);
1944 };
1945
1946 // Emit alloca for each live gc pointer
1947 for (Value *V : Live)
1948 emitAllocaFor(V);
1949
1950 // Emit allocas for rematerialized values
1951 for (const auto &Info : Records)
1952 for (auto RematerializedValuePair : Info.RematerializedValues) {
1953 Value *OriginalValue = RematerializedValuePair.second;
1954 if (AllocaMap.count(OriginalValue) != 0)
1955 continue;
1956
1957 emitAllocaFor(OriginalValue);
1958 ++NumRematerializedValues;
1959 }
1960
1961 // The next two loops are part of the same conceptual operation. We need to
1962 // insert a store to the alloca after the original def and at each
1963 // redefinition. We need to insert a load before each use. These are split
1964 // into distinct loops for performance reasons.
1965
1966 // Update gc pointer after each statepoint: either store a relocated value or
1967 // null (if no relocated value was found for this gc pointer and it is not a
1968 // gc_result). This must happen before we update the statepoint with load of
1969 // alloca otherwise we lose the link between statepoint and old def.
1970 for (const auto &Info : Records) {
1971 Value *Statepoint = Info.StatepointToken;
1972
1973 // This will be used for consistency check
1974 DenseSet<Value *> VisitedLiveValues;
1975
1976 // Insert stores for normal statepoint gc relocates
1977 insertRelocationStores(Statepoint->users(), AllocaMap, VisitedLiveValues);
1978
1979 // In case if it was invoke statepoint
1980 // we will insert stores for exceptional path gc relocates.
1981 if (isa<InvokeInst>(Statepoint)) {
1982 insertRelocationStores(Info.UnwindToken->users(), AllocaMap,
1983 VisitedLiveValues);
1984 }
1985
1986 // Do similar thing with rematerialized values
1987 insertRematerializationStores(Info.RematerializedValues, AllocaMap,
1988 VisitedLiveValues);
1989
1990 if (ClobberNonLive) {
1991 // As a debugging aid, pretend that an unrelocated pointer becomes null at
1992 // the gc.statepoint. This will turn some subtle GC problems into
1993 // slightly easier to debug SEGVs. Note that on large IR files with
1994 // lots of gc.statepoints this is extremely costly both memory and time
1995 // wise.
1996 SmallVector<AllocaInst *, 64> ToClobber;
1997 for (auto Pair : AllocaMap) {
1998 Value *Def = Pair.first;
1999 AllocaInst *Alloca = Pair.second;
2000
2001 // This value was relocated
2002 if (VisitedLiveValues.count(Def)) {
2003 continue;
2004 }
2005 ToClobber.push_back(Alloca);
2006 }
2007
2008 auto InsertClobbersAt = [&](Instruction *IP) {
2009 for (auto *AI : ToClobber) {
2010 auto PT = cast<PointerType>(AI->getAllocatedType());
2011 Constant *CPN = ConstantPointerNull::get(PT);
2012 new StoreInst(CPN, AI, IP);
2013 }
2014 };
2015
2016 // Insert the clobbering stores. These may get intermixed with the
2017 // gc.results and gc.relocates, but that's fine.
2018 if (auto II = dyn_cast<InvokeInst>(Statepoint)) {
2019 InsertClobbersAt(&*II->getNormalDest()->getFirstInsertionPt());
2020 InsertClobbersAt(&*II->getUnwindDest()->getFirstInsertionPt());
2021 } else {
2022 InsertClobbersAt(cast<Instruction>(Statepoint)->getNextNode());
2023 }
2024 }
2025 }
2026
2027 // Update use with load allocas and add store for gc_relocated.
2028 for (auto Pair : AllocaMap) {
2029 Value *Def = Pair.first;
2030 AllocaInst *Alloca = Pair.second;
2031
2032 // We pre-record the uses of allocas so that we dont have to worry about
2033 // later update that changes the user information..
2034
2035 SmallVector<Instruction *, 20> Uses;
2036 // PERF: trade a linear scan for repeated reallocation
2037 Uses.reserve(Def->getNumUses());
2038 for (User *U : Def->users()) {
2039 if (!isa<ConstantExpr>(U)) {
2040 // If the def has a ConstantExpr use, then the def is either a
2041 // ConstantExpr use itself or null. In either case
2042 // (recursively in the first, directly in the second), the oop
2043 // it is ultimately dependent on is null and this particular
2044 // use does not need to be fixed up.
2045 Uses.push_back(cast<Instruction>(U));
2046 }
2047 }
2048
2049 llvm::sort(Uses);
2050 auto Last = std::unique(Uses.begin(), Uses.end());
2051 Uses.erase(Last, Uses.end());
2052
2053 for (Instruction *Use : Uses) {
2054 if (isa<PHINode>(Use)) {
2055 PHINode *Phi = cast<PHINode>(Use);
2056 for (unsigned i = 0; i < Phi->getNumIncomingValues(); i++) {
2057 if (Def == Phi->getIncomingValue(i)) {
2058 LoadInst *Load =
2059 new LoadInst(Alloca->getAllocatedType(), Alloca, "",
2060 Phi->getIncomingBlock(i)->getTerminator());
2061 Phi->setIncomingValue(i, Load);
2062 }
2063 }
2064 } else {
2065 LoadInst *Load =
2066 new LoadInst(Alloca->getAllocatedType(), Alloca, "", Use);
2067 Use->replaceUsesOfWith(Def, Load);
2068 }
2069 }
2070
2071 // Emit store for the initial gc value. Store must be inserted after load,
2072 // otherwise store will be in alloca's use list and an extra load will be
2073 // inserted before it.
2074 StoreInst *Store = new StoreInst(Def, Alloca, /*volatile*/ false,
2075 DL.getABITypeAlign(Def->getType()));
2076 if (Instruction *Inst = dyn_cast<Instruction>(Def)) {
2077 if (InvokeInst *Invoke = dyn_cast<InvokeInst>(Inst)) {
2078 // InvokeInst is a terminator so the store need to be inserted into its
2079 // normal destination block.
2080 BasicBlock *NormalDest = Invoke->getNormalDest();
2081 Store->insertBefore(NormalDest->getFirstNonPHI());
2082 } else {
2083 assert(!Inst->isTerminator() &&
2084 "The only terminator that can produce a value is "
2085 "InvokeInst which is handled above.");
2086 Store->insertAfter(Inst);
2087 }
2088 } else {
2089 assert(isa<Argument>(Def));
2090 Store->insertAfter(cast<Instruction>(Alloca));
2091 }
2092 }
2093
2094 assert(PromotableAllocas.size() == Live.size() + NumRematerializedValues &&
2095 "we must have the same allocas with lives");
2096 if (!PromotableAllocas.empty()) {
2097 // Apply mem2reg to promote alloca to SSA
2098 PromoteMemToReg(PromotableAllocas, DT);
2099 }
2100
2101 #ifndef NDEBUG
2102 for (auto &I : F.getEntryBlock())
2103 if (isa<AllocaInst>(I))
2104 InitialAllocaNum--;
2105 assert(InitialAllocaNum == 0 && "We must not introduce any extra allocas");
2106 #endif
2107 }
2108
2109 /// Implement a unique function which doesn't require we sort the input
2110 /// vector. Doing so has the effect of changing the output of a couple of
2111 /// tests in ways which make them less useful in testing fused safepoints.
unique_unsorted(SmallVectorImpl<T> & Vec)2112 template <typename T> static void unique_unsorted(SmallVectorImpl<T> &Vec) {
2113 SmallSet<T, 8> Seen;
2114 erase_if(Vec, [&](const T &V) { return !Seen.insert(V).second; });
2115 }
2116
2117 /// Insert holders so that each Value is obviously live through the entire
2118 /// lifetime of the call.
insertUseHolderAfter(CallBase * Call,const ArrayRef<Value * > Values,SmallVectorImpl<CallInst * > & Holders)2119 static void insertUseHolderAfter(CallBase *Call, const ArrayRef<Value *> Values,
2120 SmallVectorImpl<CallInst *> &Holders) {
2121 if (Values.empty())
2122 // No values to hold live, might as well not insert the empty holder
2123 return;
2124
2125 Module *M = Call->getModule();
2126 // Use a dummy vararg function to actually hold the values live
2127 FunctionCallee Func = M->getOrInsertFunction(
2128 "__tmp_use", FunctionType::get(Type::getVoidTy(M->getContext()), true));
2129 if (isa<CallInst>(Call)) {
2130 // For call safepoints insert dummy calls right after safepoint
2131 Holders.push_back(
2132 CallInst::Create(Func, Values, "", &*++Call->getIterator()));
2133 return;
2134 }
2135 // For invoke safepooints insert dummy calls both in normal and
2136 // exceptional destination blocks
2137 auto *II = cast<InvokeInst>(Call);
2138 Holders.push_back(CallInst::Create(
2139 Func, Values, "", &*II->getNormalDest()->getFirstInsertionPt()));
2140 Holders.push_back(CallInst::Create(
2141 Func, Values, "", &*II->getUnwindDest()->getFirstInsertionPt()));
2142 }
2143
findLiveReferences(Function & F,DominatorTree & DT,ArrayRef<CallBase * > toUpdate,MutableArrayRef<struct PartiallyConstructedSafepointRecord> records)2144 static void findLiveReferences(
2145 Function &F, DominatorTree &DT, ArrayRef<CallBase *> toUpdate,
2146 MutableArrayRef<struct PartiallyConstructedSafepointRecord> records) {
2147 GCPtrLivenessData OriginalLivenessData;
2148 computeLiveInValues(DT, F, OriginalLivenessData);
2149 for (size_t i = 0; i < records.size(); i++) {
2150 struct PartiallyConstructedSafepointRecord &info = records[i];
2151 analyzeParsePointLiveness(DT, OriginalLivenessData, toUpdate[i], info);
2152 }
2153 }
2154
2155 // Helper function for the "rematerializeLiveValues". It walks use chain
2156 // starting from the "CurrentValue" until it reaches the root of the chain, i.e.
2157 // the base or a value it cannot process. Only "simple" values are processed
2158 // (currently it is GEP's and casts). The returned root is examined by the
2159 // callers of findRematerializableChainToBasePointer. Fills "ChainToBase" array
2160 // with all visited values.
findRematerializableChainToBasePointer(SmallVectorImpl<Instruction * > & ChainToBase,Value * CurrentValue)2161 static Value* findRematerializableChainToBasePointer(
2162 SmallVectorImpl<Instruction*> &ChainToBase,
2163 Value *CurrentValue) {
2164 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(CurrentValue)) {
2165 ChainToBase.push_back(GEP);
2166 return findRematerializableChainToBasePointer(ChainToBase,
2167 GEP->getPointerOperand());
2168 }
2169
2170 if (CastInst *CI = dyn_cast<CastInst>(CurrentValue)) {
2171 if (!CI->isNoopCast(CI->getModule()->getDataLayout()))
2172 return CI;
2173
2174 ChainToBase.push_back(CI);
2175 return findRematerializableChainToBasePointer(ChainToBase,
2176 CI->getOperand(0));
2177 }
2178
2179 // We have reached the root of the chain, which is either equal to the base or
2180 // is the first unsupported value along the use chain.
2181 return CurrentValue;
2182 }
2183
2184 // Helper function for the "rematerializeLiveValues". Compute cost of the use
2185 // chain we are going to rematerialize.
2186 static InstructionCost
chainToBasePointerCost(SmallVectorImpl<Instruction * > & Chain,TargetTransformInfo & TTI)2187 chainToBasePointerCost(SmallVectorImpl<Instruction *> &Chain,
2188 TargetTransformInfo &TTI) {
2189 InstructionCost Cost = 0;
2190
2191 for (Instruction *Instr : Chain) {
2192 if (CastInst *CI = dyn_cast<CastInst>(Instr)) {
2193 assert(CI->isNoopCast(CI->getModule()->getDataLayout()) &&
2194 "non noop cast is found during rematerialization");
2195
2196 Type *SrcTy = CI->getOperand(0)->getType();
2197 Cost += TTI.getCastInstrCost(CI->getOpcode(), CI->getType(), SrcTy,
2198 TTI::getCastContextHint(CI),
2199 TargetTransformInfo::TCK_SizeAndLatency, CI);
2200
2201 } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Instr)) {
2202 // Cost of the address calculation
2203 Type *ValTy = GEP->getSourceElementType();
2204 Cost += TTI.getAddressComputationCost(ValTy);
2205
2206 // And cost of the GEP itself
2207 // TODO: Use TTI->getGEPCost here (it exists, but appears to be not
2208 // allowed for the external usage)
2209 if (!GEP->hasAllConstantIndices())
2210 Cost += 2;
2211
2212 } else {
2213 llvm_unreachable("unsupported instruction type during rematerialization");
2214 }
2215 }
2216
2217 return Cost;
2218 }
2219
AreEquivalentPhiNodes(PHINode & OrigRootPhi,PHINode & AlternateRootPhi)2220 static bool AreEquivalentPhiNodes(PHINode &OrigRootPhi, PHINode &AlternateRootPhi) {
2221 unsigned PhiNum = OrigRootPhi.getNumIncomingValues();
2222 if (PhiNum != AlternateRootPhi.getNumIncomingValues() ||
2223 OrigRootPhi.getParent() != AlternateRootPhi.getParent())
2224 return false;
2225 // Map of incoming values and their corresponding basic blocks of
2226 // OrigRootPhi.
2227 SmallDenseMap<Value *, BasicBlock *, 8> CurrentIncomingValues;
2228 for (unsigned i = 0; i < PhiNum; i++)
2229 CurrentIncomingValues[OrigRootPhi.getIncomingValue(i)] =
2230 OrigRootPhi.getIncomingBlock(i);
2231
2232 // Both current and base PHIs should have same incoming values and
2233 // the same basic blocks corresponding to the incoming values.
2234 for (unsigned i = 0; i < PhiNum; i++) {
2235 auto CIVI =
2236 CurrentIncomingValues.find(AlternateRootPhi.getIncomingValue(i));
2237 if (CIVI == CurrentIncomingValues.end())
2238 return false;
2239 BasicBlock *CurrentIncomingBB = CIVI->second;
2240 if (CurrentIncomingBB != AlternateRootPhi.getIncomingBlock(i))
2241 return false;
2242 }
2243 return true;
2244 }
2245
2246 // From the statepoint live set pick values that are cheaper to recompute then
2247 // to relocate. Remove this values from the live set, rematerialize them after
2248 // statepoint and record them in "Info" structure. Note that similar to
2249 // relocated values we don't do any user adjustments here.
rematerializeLiveValues(CallBase * Call,PartiallyConstructedSafepointRecord & Info,TargetTransformInfo & TTI)2250 static void rematerializeLiveValues(CallBase *Call,
2251 PartiallyConstructedSafepointRecord &Info,
2252 TargetTransformInfo &TTI) {
2253 const unsigned int ChainLengthThreshold = 10;
2254
2255 // Record values we are going to delete from this statepoint live set.
2256 // We can not di this in following loop due to iterator invalidation.
2257 SmallVector<Value *, 32> LiveValuesToBeDeleted;
2258
2259 for (Value *LiveValue: Info.LiveSet) {
2260 // For each live pointer find its defining chain
2261 SmallVector<Instruction *, 3> ChainToBase;
2262 assert(Info.PointerToBase.count(LiveValue));
2263 Value *RootOfChain =
2264 findRematerializableChainToBasePointer(ChainToBase,
2265 LiveValue);
2266
2267 // Nothing to do, or chain is too long
2268 if ( ChainToBase.size() == 0 ||
2269 ChainToBase.size() > ChainLengthThreshold)
2270 continue;
2271
2272 // Handle the scenario where the RootOfChain is not equal to the
2273 // Base Value, but they are essentially the same phi values.
2274 if (RootOfChain != Info.PointerToBase[LiveValue]) {
2275 PHINode *OrigRootPhi = dyn_cast<PHINode>(RootOfChain);
2276 PHINode *AlternateRootPhi = dyn_cast<PHINode>(Info.PointerToBase[LiveValue]);
2277 if (!OrigRootPhi || !AlternateRootPhi)
2278 continue;
2279 // PHI nodes that have the same incoming values, and belonging to the same
2280 // basic blocks are essentially the same SSA value. When the original phi
2281 // has incoming values with different base pointers, the original phi is
2282 // marked as conflict, and an additional `AlternateRootPhi` with the same
2283 // incoming values get generated by the findBasePointer function. We need
2284 // to identify the newly generated AlternateRootPhi (.base version of phi)
2285 // and RootOfChain (the original phi node itself) are the same, so that we
2286 // can rematerialize the gep and casts. This is a workaround for the
2287 // deficiency in the findBasePointer algorithm.
2288 if (!AreEquivalentPhiNodes(*OrigRootPhi, *AlternateRootPhi))
2289 continue;
2290 // Now that the phi nodes are proved to be the same, assert that
2291 // findBasePointer's newly generated AlternateRootPhi is present in the
2292 // liveset of the call.
2293 assert(Info.LiveSet.count(AlternateRootPhi));
2294 }
2295 // Compute cost of this chain
2296 InstructionCost Cost = chainToBasePointerCost(ChainToBase, TTI);
2297 // TODO: We can also account for cases when we will be able to remove some
2298 // of the rematerialized values by later optimization passes. I.e if
2299 // we rematerialized several intersecting chains. Or if original values
2300 // don't have any uses besides this statepoint.
2301
2302 // For invokes we need to rematerialize each chain twice - for normal and
2303 // for unwind basic blocks. Model this by multiplying cost by two.
2304 if (isa<InvokeInst>(Call)) {
2305 Cost *= 2;
2306 }
2307 // If it's too expensive - skip it
2308 if (Cost >= RematerializationThreshold)
2309 continue;
2310
2311 // Remove value from the live set
2312 LiveValuesToBeDeleted.push_back(LiveValue);
2313
2314 // Clone instructions and record them inside "Info" structure
2315
2316 // Walk backwards to visit top-most instructions first
2317 std::reverse(ChainToBase.begin(), ChainToBase.end());
2318
2319 // Utility function which clones all instructions from "ChainToBase"
2320 // and inserts them before "InsertBefore". Returns rematerialized value
2321 // which should be used after statepoint.
2322 auto rematerializeChain = [&ChainToBase](
2323 Instruction *InsertBefore, Value *RootOfChain, Value *AlternateLiveBase) {
2324 Instruction *LastClonedValue = nullptr;
2325 Instruction *LastValue = nullptr;
2326 for (Instruction *Instr: ChainToBase) {
2327 // Only GEP's and casts are supported as we need to be careful to not
2328 // introduce any new uses of pointers not in the liveset.
2329 // Note that it's fine to introduce new uses of pointers which were
2330 // otherwise not used after this statepoint.
2331 assert(isa<GetElementPtrInst>(Instr) || isa<CastInst>(Instr));
2332
2333 Instruction *ClonedValue = Instr->clone();
2334 ClonedValue->insertBefore(InsertBefore);
2335 ClonedValue->setName(Instr->getName() + ".remat");
2336
2337 // If it is not first instruction in the chain then it uses previously
2338 // cloned value. We should update it to use cloned value.
2339 if (LastClonedValue) {
2340 assert(LastValue);
2341 ClonedValue->replaceUsesOfWith(LastValue, LastClonedValue);
2342 #ifndef NDEBUG
2343 for (auto OpValue : ClonedValue->operand_values()) {
2344 // Assert that cloned instruction does not use any instructions from
2345 // this chain other than LastClonedValue
2346 assert(!is_contained(ChainToBase, OpValue) &&
2347 "incorrect use in rematerialization chain");
2348 // Assert that the cloned instruction does not use the RootOfChain
2349 // or the AlternateLiveBase.
2350 assert(OpValue != RootOfChain && OpValue != AlternateLiveBase);
2351 }
2352 #endif
2353 } else {
2354 // For the first instruction, replace the use of unrelocated base i.e.
2355 // RootOfChain/OrigRootPhi, with the corresponding PHI present in the
2356 // live set. They have been proved to be the same PHI nodes. Note
2357 // that the *only* use of the RootOfChain in the ChainToBase list is
2358 // the first Value in the list.
2359 if (RootOfChain != AlternateLiveBase)
2360 ClonedValue->replaceUsesOfWith(RootOfChain, AlternateLiveBase);
2361 }
2362
2363 LastClonedValue = ClonedValue;
2364 LastValue = Instr;
2365 }
2366 assert(LastClonedValue);
2367 return LastClonedValue;
2368 };
2369
2370 // Different cases for calls and invokes. For invokes we need to clone
2371 // instructions both on normal and unwind path.
2372 if (isa<CallInst>(Call)) {
2373 Instruction *InsertBefore = Call->getNextNode();
2374 assert(InsertBefore);
2375 Instruction *RematerializedValue = rematerializeChain(
2376 InsertBefore, RootOfChain, Info.PointerToBase[LiveValue]);
2377 Info.RematerializedValues[RematerializedValue] = LiveValue;
2378 } else {
2379 auto *Invoke = cast<InvokeInst>(Call);
2380
2381 Instruction *NormalInsertBefore =
2382 &*Invoke->getNormalDest()->getFirstInsertionPt();
2383 Instruction *UnwindInsertBefore =
2384 &*Invoke->getUnwindDest()->getFirstInsertionPt();
2385
2386 Instruction *NormalRematerializedValue = rematerializeChain(
2387 NormalInsertBefore, RootOfChain, Info.PointerToBase[LiveValue]);
2388 Instruction *UnwindRematerializedValue = rematerializeChain(
2389 UnwindInsertBefore, RootOfChain, Info.PointerToBase[LiveValue]);
2390
2391 Info.RematerializedValues[NormalRematerializedValue] = LiveValue;
2392 Info.RematerializedValues[UnwindRematerializedValue] = LiveValue;
2393 }
2394 }
2395
2396 // Remove rematerializaed values from the live set
2397 for (auto LiveValue: LiveValuesToBeDeleted) {
2398 Info.LiveSet.remove(LiveValue);
2399 }
2400 }
2401
inlineGetBaseAndOffset(Function & F,SmallVectorImpl<CallInst * > & Intrinsics,DefiningValueMapTy & DVCache)2402 static bool inlineGetBaseAndOffset(Function &F,
2403 SmallVectorImpl<CallInst *> &Intrinsics,
2404 DefiningValueMapTy &DVCache) {
2405 auto &Context = F.getContext();
2406 auto &DL = F.getParent()->getDataLayout();
2407 bool Changed = false;
2408
2409 for (auto *Callsite : Intrinsics)
2410 switch (Callsite->getIntrinsicID()) {
2411 case Intrinsic::experimental_gc_get_pointer_base: {
2412 Changed = true;
2413 Value *Base = findBasePointer(Callsite->getOperand(0), DVCache);
2414 assert(!DVCache.count(Callsite));
2415 auto *BaseBC = IRBuilder<>(Callsite).CreateBitCast(
2416 Base, Callsite->getType(), suffixed_name_or(Base, ".cast", ""));
2417 if (BaseBC != Base)
2418 DVCache[BaseBC] = Base;
2419 Callsite->replaceAllUsesWith(BaseBC);
2420 if (!BaseBC->hasName())
2421 BaseBC->takeName(Callsite);
2422 Callsite->eraseFromParent();
2423 break;
2424 }
2425 case Intrinsic::experimental_gc_get_pointer_offset: {
2426 Changed = true;
2427 Value *Derived = Callsite->getOperand(0);
2428 Value *Base = findBasePointer(Derived, DVCache);
2429 assert(!DVCache.count(Callsite));
2430 unsigned AddressSpace = Derived->getType()->getPointerAddressSpace();
2431 unsigned IntPtrSize = DL.getPointerSizeInBits(AddressSpace);
2432 IRBuilder<> Builder(Callsite);
2433 Value *BaseInt =
2434 Builder.CreatePtrToInt(Base, Type::getIntNTy(Context, IntPtrSize),
2435 suffixed_name_or(Base, ".int", ""));
2436 Value *DerivedInt =
2437 Builder.CreatePtrToInt(Derived, Type::getIntNTy(Context, IntPtrSize),
2438 suffixed_name_or(Derived, ".int", ""));
2439 Value *Offset = Builder.CreateSub(DerivedInt, BaseInt);
2440 Callsite->replaceAllUsesWith(Offset);
2441 Offset->takeName(Callsite);
2442 Callsite->eraseFromParent();
2443 break;
2444 }
2445 default:
2446 llvm_unreachable("Unknown intrinsic");
2447 }
2448
2449 return Changed;
2450 }
2451
insertParsePoints(Function & F,DominatorTree & DT,TargetTransformInfo & TTI,SmallVectorImpl<CallBase * > & ToUpdate,DefiningValueMapTy & DVCache)2452 static bool insertParsePoints(Function &F, DominatorTree &DT,
2453 TargetTransformInfo &TTI,
2454 SmallVectorImpl<CallBase *> &ToUpdate,
2455 DefiningValueMapTy &DVCache) {
2456 #ifndef NDEBUG
2457 // sanity check the input
2458 std::set<CallBase *> Uniqued;
2459 Uniqued.insert(ToUpdate.begin(), ToUpdate.end());
2460 assert(Uniqued.size() == ToUpdate.size() && "no duplicates please!");
2461
2462 for (CallBase *Call : ToUpdate)
2463 assert(Call->getFunction() == &F);
2464 #endif
2465
2466 // When inserting gc.relocates for invokes, we need to be able to insert at
2467 // the top of the successor blocks. See the comment on
2468 // normalForInvokeSafepoint on exactly what is needed. Note that this step
2469 // may restructure the CFG.
2470 for (CallBase *Call : ToUpdate) {
2471 auto *II = dyn_cast<InvokeInst>(Call);
2472 if (!II)
2473 continue;
2474 normalizeForInvokeSafepoint(II->getNormalDest(), II->getParent(), DT);
2475 normalizeForInvokeSafepoint(II->getUnwindDest(), II->getParent(), DT);
2476 }
2477
2478 // A list of dummy calls added to the IR to keep various values obviously
2479 // live in the IR. We'll remove all of these when done.
2480 SmallVector<CallInst *, 64> Holders;
2481
2482 // Insert a dummy call with all of the deopt operands we'll need for the
2483 // actual safepoint insertion as arguments. This ensures reference operands
2484 // in the deopt argument list are considered live through the safepoint (and
2485 // thus makes sure they get relocated.)
2486 for (CallBase *Call : ToUpdate) {
2487 SmallVector<Value *, 64> DeoptValues;
2488
2489 for (Value *Arg : GetDeoptBundleOperands(Call)) {
2490 assert(!isUnhandledGCPointerType(Arg->getType()) &&
2491 "support for FCA unimplemented");
2492 if (isHandledGCPointerType(Arg->getType()))
2493 DeoptValues.push_back(Arg);
2494 }
2495
2496 insertUseHolderAfter(Call, DeoptValues, Holders);
2497 }
2498
2499 SmallVector<PartiallyConstructedSafepointRecord, 64> Records(ToUpdate.size());
2500
2501 // A) Identify all gc pointers which are statically live at the given call
2502 // site.
2503 findLiveReferences(F, DT, ToUpdate, Records);
2504
2505 // B) Find the base pointers for each live pointer
2506 for (size_t i = 0; i < Records.size(); i++) {
2507 PartiallyConstructedSafepointRecord &info = Records[i];
2508 findBasePointers(DT, DVCache, ToUpdate[i], info);
2509 }
2510
2511 // The base phi insertion logic (for any safepoint) may have inserted new
2512 // instructions which are now live at some safepoint. The simplest such
2513 // example is:
2514 // loop:
2515 // phi a <-- will be a new base_phi here
2516 // safepoint 1 <-- that needs to be live here
2517 // gep a + 1
2518 // safepoint 2
2519 // br loop
2520 // We insert some dummy calls after each safepoint to definitely hold live
2521 // the base pointers which were identified for that safepoint. We'll then
2522 // ask liveness for _every_ base inserted to see what is now live. Then we
2523 // remove the dummy calls.
2524 Holders.reserve(Holders.size() + Records.size());
2525 for (size_t i = 0; i < Records.size(); i++) {
2526 PartiallyConstructedSafepointRecord &Info = Records[i];
2527
2528 SmallVector<Value *, 128> Bases;
2529 for (auto Pair : Info.PointerToBase)
2530 Bases.push_back(Pair.second);
2531
2532 insertUseHolderAfter(ToUpdate[i], Bases, Holders);
2533 }
2534
2535 // By selecting base pointers, we've effectively inserted new uses. Thus, we
2536 // need to rerun liveness. We may *also* have inserted new defs, but that's
2537 // not the key issue.
2538 recomputeLiveInValues(F, DT, ToUpdate, Records);
2539
2540 if (PrintBasePointers) {
2541 for (auto &Info : Records) {
2542 errs() << "Base Pairs: (w/Relocation)\n";
2543 for (auto Pair : Info.PointerToBase) {
2544 errs() << " derived ";
2545 Pair.first->printAsOperand(errs(), false);
2546 errs() << " base ";
2547 Pair.second->printAsOperand(errs(), false);
2548 errs() << "\n";
2549 }
2550 }
2551 }
2552
2553 // It is possible that non-constant live variables have a constant base. For
2554 // example, a GEP with a variable offset from a global. In this case we can
2555 // remove it from the liveset. We already don't add constants to the liveset
2556 // because we assume they won't move at runtime and the GC doesn't need to be
2557 // informed about them. The same reasoning applies if the base is constant.
2558 // Note that the relocation placement code relies on this filtering for
2559 // correctness as it expects the base to be in the liveset, which isn't true
2560 // if the base is constant.
2561 for (auto &Info : Records)
2562 for (auto &BasePair : Info.PointerToBase)
2563 if (isa<Constant>(BasePair.second))
2564 Info.LiveSet.remove(BasePair.first);
2565
2566 for (CallInst *CI : Holders)
2567 CI->eraseFromParent();
2568
2569 Holders.clear();
2570
2571 // In order to reduce live set of statepoint we might choose to rematerialize
2572 // some values instead of relocating them. This is purely an optimization and
2573 // does not influence correctness.
2574 for (size_t i = 0; i < Records.size(); i++)
2575 rematerializeLiveValues(ToUpdate[i], Records[i], TTI);
2576
2577 // We need this to safely RAUW and delete call or invoke return values that
2578 // may themselves be live over a statepoint. For details, please see usage in
2579 // makeStatepointExplicitImpl.
2580 std::vector<DeferredReplacement> Replacements;
2581
2582 // Now run through and replace the existing statepoints with new ones with
2583 // the live variables listed. We do not yet update uses of the values being
2584 // relocated. We have references to live variables that need to
2585 // survive to the last iteration of this loop. (By construction, the
2586 // previous statepoint can not be a live variable, thus we can and remove
2587 // the old statepoint calls as we go.)
2588 for (size_t i = 0; i < Records.size(); i++)
2589 makeStatepointExplicit(DT, ToUpdate[i], Records[i], Replacements);
2590
2591 ToUpdate.clear(); // prevent accident use of invalid calls.
2592
2593 for (auto &PR : Replacements)
2594 PR.doReplacement();
2595
2596 Replacements.clear();
2597
2598 for (auto &Info : Records) {
2599 // These live sets may contain state Value pointers, since we replaced calls
2600 // with operand bundles with calls wrapped in gc.statepoint, and some of
2601 // those calls may have been def'ing live gc pointers. Clear these out to
2602 // avoid accidentally using them.
2603 //
2604 // TODO: We should create a separate data structure that does not contain
2605 // these live sets, and migrate to using that data structure from this point
2606 // onward.
2607 Info.LiveSet.clear();
2608 Info.PointerToBase.clear();
2609 }
2610
2611 // Do all the fixups of the original live variables to their relocated selves
2612 SmallVector<Value *, 128> Live;
2613 for (size_t i = 0; i < Records.size(); i++) {
2614 PartiallyConstructedSafepointRecord &Info = Records[i];
2615
2616 // We can't simply save the live set from the original insertion. One of
2617 // the live values might be the result of a call which needs a safepoint.
2618 // That Value* no longer exists and we need to use the new gc_result.
2619 // Thankfully, the live set is embedded in the statepoint (and updated), so
2620 // we just grab that.
2621 llvm::append_range(Live, Info.StatepointToken->gc_args());
2622 #ifndef NDEBUG
2623 // Do some basic sanity checks on our liveness results before performing
2624 // relocation. Relocation can and will turn mistakes in liveness results
2625 // into non-sensical code which is must harder to debug.
2626 // TODO: It would be nice to test consistency as well
2627 assert(DT.isReachableFromEntry(Info.StatepointToken->getParent()) &&
2628 "statepoint must be reachable or liveness is meaningless");
2629 for (Value *V : Info.StatepointToken->gc_args()) {
2630 if (!isa<Instruction>(V))
2631 // Non-instruction values trivial dominate all possible uses
2632 continue;
2633 auto *LiveInst = cast<Instruction>(V);
2634 assert(DT.isReachableFromEntry(LiveInst->getParent()) &&
2635 "unreachable values should never be live");
2636 assert(DT.dominates(LiveInst, Info.StatepointToken) &&
2637 "basic SSA liveness expectation violated by liveness analysis");
2638 }
2639 #endif
2640 }
2641 unique_unsorted(Live);
2642
2643 #ifndef NDEBUG
2644 // sanity check
2645 for (auto *Ptr : Live)
2646 assert(isHandledGCPointerType(Ptr->getType()) &&
2647 "must be a gc pointer type");
2648 #endif
2649
2650 relocationViaAlloca(F, DT, Live, Records);
2651 return !Records.empty();
2652 }
2653
2654 // Handles both return values and arguments for Functions and calls.
2655 template <typename AttrHolder>
RemoveNonValidAttrAtIndex(LLVMContext & Ctx,AttrHolder & AH,unsigned Index)2656 static void RemoveNonValidAttrAtIndex(LLVMContext &Ctx, AttrHolder &AH,
2657 unsigned Index) {
2658 AttrBuilder R;
2659 if (AH.getDereferenceableBytes(Index))
2660 R.addAttribute(Attribute::get(Ctx, Attribute::Dereferenceable,
2661 AH.getDereferenceableBytes(Index)));
2662 if (AH.getDereferenceableOrNullBytes(Index))
2663 R.addAttribute(Attribute::get(Ctx, Attribute::DereferenceableOrNull,
2664 AH.getDereferenceableOrNullBytes(Index)));
2665 for (auto Attr : ParamAttrsToStrip)
2666 if (AH.getAttributes().hasAttribute(Index, Attr))
2667 R.addAttribute(Attr);
2668
2669 if (!R.empty())
2670 AH.setAttributes(AH.getAttributes().removeAttributes(Ctx, Index, R));
2671 }
2672
stripNonValidAttributesFromPrototype(Function & F)2673 static void stripNonValidAttributesFromPrototype(Function &F) {
2674 LLVMContext &Ctx = F.getContext();
2675
2676 // Intrinsics are very delicate. Lowering sometimes depends the presence
2677 // of certain attributes for correctness, but we may have also inferred
2678 // additional ones in the abstract machine model which need stripped. This
2679 // assumes that the attributes defined in Intrinsic.td are conservatively
2680 // correct for both physical and abstract model.
2681 if (Intrinsic::ID id = F.getIntrinsicID()) {
2682 F.setAttributes(Intrinsic::getAttributes(Ctx, id));
2683 return;
2684 }
2685
2686 for (Argument &A : F.args())
2687 if (isa<PointerType>(A.getType()))
2688 RemoveNonValidAttrAtIndex(Ctx, F,
2689 A.getArgNo() + AttributeList::FirstArgIndex);
2690
2691 if (isa<PointerType>(F.getReturnType()))
2692 RemoveNonValidAttrAtIndex(Ctx, F, AttributeList::ReturnIndex);
2693
2694 for (auto Attr : FnAttrsToStrip)
2695 F.removeFnAttr(Attr);
2696 }
2697
2698 /// Certain metadata on instructions are invalid after running RS4GC.
2699 /// Optimizations that run after RS4GC can incorrectly use this metadata to
2700 /// optimize functions. We drop such metadata on the instruction.
stripInvalidMetadataFromInstruction(Instruction & I)2701 static void stripInvalidMetadataFromInstruction(Instruction &I) {
2702 if (!isa<LoadInst>(I) && !isa<StoreInst>(I))
2703 return;
2704 // These are the attributes that are still valid on loads and stores after
2705 // RS4GC.
2706 // The metadata implying dereferenceability and noalias are (conservatively)
2707 // dropped. This is because semantically, after RewriteStatepointsForGC runs,
2708 // all calls to gc.statepoint "free" the entire heap. Also, gc.statepoint can
2709 // touch the entire heap including noalias objects. Note: The reasoning is
2710 // same as stripping the dereferenceability and noalias attributes that are
2711 // analogous to the metadata counterparts.
2712 // We also drop the invariant.load metadata on the load because that metadata
2713 // implies the address operand to the load points to memory that is never
2714 // changed once it became dereferenceable. This is no longer true after RS4GC.
2715 // Similar reasoning applies to invariant.group metadata, which applies to
2716 // loads within a group.
2717 unsigned ValidMetadataAfterRS4GC[] = {LLVMContext::MD_tbaa,
2718 LLVMContext::MD_range,
2719 LLVMContext::MD_alias_scope,
2720 LLVMContext::MD_nontemporal,
2721 LLVMContext::MD_nonnull,
2722 LLVMContext::MD_align,
2723 LLVMContext::MD_type};
2724
2725 // Drops all metadata on the instruction other than ValidMetadataAfterRS4GC.
2726 I.dropUnknownNonDebugMetadata(ValidMetadataAfterRS4GC);
2727 }
2728
stripNonValidDataFromBody(Function & F)2729 static void stripNonValidDataFromBody(Function &F) {
2730 if (F.empty())
2731 return;
2732
2733 LLVMContext &Ctx = F.getContext();
2734 MDBuilder Builder(Ctx);
2735
2736 // Set of invariantstart instructions that we need to remove.
2737 // Use this to avoid invalidating the instruction iterator.
2738 SmallVector<IntrinsicInst*, 12> InvariantStartInstructions;
2739
2740 for (Instruction &I : instructions(F)) {
2741 // invariant.start on memory location implies that the referenced memory
2742 // location is constant and unchanging. This is no longer true after
2743 // RewriteStatepointsForGC runs because there can be calls to gc.statepoint
2744 // which frees the entire heap and the presence of invariant.start allows
2745 // the optimizer to sink the load of a memory location past a statepoint,
2746 // which is incorrect.
2747 if (auto *II = dyn_cast<IntrinsicInst>(&I))
2748 if (II->getIntrinsicID() == Intrinsic::invariant_start) {
2749 InvariantStartInstructions.push_back(II);
2750 continue;
2751 }
2752
2753 if (MDNode *Tag = I.getMetadata(LLVMContext::MD_tbaa)) {
2754 MDNode *MutableTBAA = Builder.createMutableTBAAAccessTag(Tag);
2755 I.setMetadata(LLVMContext::MD_tbaa, MutableTBAA);
2756 }
2757
2758 stripInvalidMetadataFromInstruction(I);
2759
2760 if (auto *Call = dyn_cast<CallBase>(&I)) {
2761 for (int i = 0, e = Call->arg_size(); i != e; i++)
2762 if (isa<PointerType>(Call->getArgOperand(i)->getType()))
2763 RemoveNonValidAttrAtIndex(Ctx, *Call,
2764 i + AttributeList::FirstArgIndex);
2765 if (isa<PointerType>(Call->getType()))
2766 RemoveNonValidAttrAtIndex(Ctx, *Call, AttributeList::ReturnIndex);
2767 }
2768 }
2769
2770 // Delete the invariant.start instructions and RAUW undef.
2771 for (auto *II : InvariantStartInstructions) {
2772 II->replaceAllUsesWith(UndefValue::get(II->getType()));
2773 II->eraseFromParent();
2774 }
2775 }
2776
2777 /// Returns true if this function should be rewritten by this pass. The main
2778 /// point of this function is as an extension point for custom logic.
shouldRewriteStatepointsIn(Function & F)2779 static bool shouldRewriteStatepointsIn(Function &F) {
2780 // TODO: This should check the GCStrategy
2781 if (F.hasGC()) {
2782 const auto &FunctionGCName = F.getGC();
2783 const StringRef StatepointExampleName("statepoint-example");
2784 const StringRef CoreCLRName("coreclr");
2785 return (StatepointExampleName == FunctionGCName) ||
2786 (CoreCLRName == FunctionGCName);
2787 } else
2788 return false;
2789 }
2790
stripNonValidData(Module & M)2791 static void stripNonValidData(Module &M) {
2792 #ifndef NDEBUG
2793 assert(llvm::any_of(M, shouldRewriteStatepointsIn) && "precondition!");
2794 #endif
2795
2796 for (Function &F : M)
2797 stripNonValidAttributesFromPrototype(F);
2798
2799 for (Function &F : M)
2800 stripNonValidDataFromBody(F);
2801 }
2802
runOnFunction(Function & F,DominatorTree & DT,TargetTransformInfo & TTI,const TargetLibraryInfo & TLI)2803 bool RewriteStatepointsForGC::runOnFunction(Function &F, DominatorTree &DT,
2804 TargetTransformInfo &TTI,
2805 const TargetLibraryInfo &TLI) {
2806 assert(!F.isDeclaration() && !F.empty() &&
2807 "need function body to rewrite statepoints in");
2808 assert(shouldRewriteStatepointsIn(F) && "mismatch in rewrite decision");
2809
2810 auto NeedsRewrite = [&TLI](Instruction &I) {
2811 if (const auto *Call = dyn_cast<CallBase>(&I)) {
2812 if (isa<GCStatepointInst>(Call))
2813 return false;
2814 if (callsGCLeafFunction(Call, TLI))
2815 return false;
2816
2817 // Normally it's up to the frontend to make sure that non-leaf calls also
2818 // have proper deopt state if it is required. We make an exception for
2819 // element atomic memcpy/memmove intrinsics here. Unlike other intrinsics
2820 // these are non-leaf by default. They might be generated by the optimizer
2821 // which doesn't know how to produce a proper deopt state. So if we see a
2822 // non-leaf memcpy/memmove without deopt state just treat it as a leaf
2823 // copy and don't produce a statepoint.
2824 if (!AllowStatepointWithNoDeoptInfo &&
2825 !Call->getOperandBundle(LLVMContext::OB_deopt)) {
2826 assert((isa<AtomicMemCpyInst>(Call) || isa<AtomicMemMoveInst>(Call)) &&
2827 "Don't expect any other calls here!");
2828 return false;
2829 }
2830 return true;
2831 }
2832 return false;
2833 };
2834
2835 // Delete any unreachable statepoints so that we don't have unrewritten
2836 // statepoints surviving this pass. This makes testing easier and the
2837 // resulting IR less confusing to human readers.
2838 DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Lazy);
2839 bool MadeChange = removeUnreachableBlocks(F, &DTU);
2840 // Flush the Dominator Tree.
2841 DTU.getDomTree();
2842
2843 // Gather all the statepoints which need rewritten. Be careful to only
2844 // consider those in reachable code since we need to ask dominance queries
2845 // when rewriting. We'll delete the unreachable ones in a moment.
2846 SmallVector<CallBase *, 64> ParsePointNeeded;
2847 SmallVector<CallInst *, 64> Intrinsics;
2848 for (Instruction &I : instructions(F)) {
2849 // TODO: only the ones with the flag set!
2850 if (NeedsRewrite(I)) {
2851 // NOTE removeUnreachableBlocks() is stronger than
2852 // DominatorTree::isReachableFromEntry(). In other words
2853 // removeUnreachableBlocks can remove some blocks for which
2854 // isReachableFromEntry() returns true.
2855 assert(DT.isReachableFromEntry(I.getParent()) &&
2856 "no unreachable blocks expected");
2857 ParsePointNeeded.push_back(cast<CallBase>(&I));
2858 }
2859 if (auto *CI = dyn_cast<CallInst>(&I))
2860 if (CI->getIntrinsicID() == Intrinsic::experimental_gc_get_pointer_base ||
2861 CI->getIntrinsicID() == Intrinsic::experimental_gc_get_pointer_offset)
2862 Intrinsics.emplace_back(CI);
2863 }
2864
2865 // Return early if no work to do.
2866 if (ParsePointNeeded.empty() && Intrinsics.empty())
2867 return MadeChange;
2868
2869 // As a prepass, go ahead and aggressively destroy single entry phi nodes.
2870 // These are created by LCSSA. They have the effect of increasing the size
2871 // of liveness sets for no good reason. It may be harder to do this post
2872 // insertion since relocations and base phis can confuse things.
2873 for (BasicBlock &BB : F)
2874 if (BB.getUniquePredecessor())
2875 MadeChange |= FoldSingleEntryPHINodes(&BB);
2876
2877 // Before we start introducing relocations, we want to tweak the IR a bit to
2878 // avoid unfortunate code generation effects. The main example is that we
2879 // want to try to make sure the comparison feeding a branch is after any
2880 // safepoints. Otherwise, we end up with a comparison of pre-relocation
2881 // values feeding a branch after relocation. This is semantically correct,
2882 // but results in extra register pressure since both the pre-relocation and
2883 // post-relocation copies must be available in registers. For code without
2884 // relocations this is handled elsewhere, but teaching the scheduler to
2885 // reverse the transform we're about to do would be slightly complex.
2886 // Note: This may extend the live range of the inputs to the icmp and thus
2887 // increase the liveset of any statepoint we move over. This is profitable
2888 // as long as all statepoints are in rare blocks. If we had in-register
2889 // lowering for live values this would be a much safer transform.
2890 auto getConditionInst = [](Instruction *TI) -> Instruction * {
2891 if (auto *BI = dyn_cast<BranchInst>(TI))
2892 if (BI->isConditional())
2893 return dyn_cast<Instruction>(BI->getCondition());
2894 // TODO: Extend this to handle switches
2895 return nullptr;
2896 };
2897 for (BasicBlock &BB : F) {
2898 Instruction *TI = BB.getTerminator();
2899 if (auto *Cond = getConditionInst(TI))
2900 // TODO: Handle more than just ICmps here. We should be able to move
2901 // most instructions without side effects or memory access.
2902 if (isa<ICmpInst>(Cond) && Cond->hasOneUse()) {
2903 MadeChange = true;
2904 Cond->moveBefore(TI);
2905 }
2906 }
2907
2908 // Nasty workaround - The base computation code in the main algorithm doesn't
2909 // consider the fact that a GEP can be used to convert a scalar to a vector.
2910 // The right fix for this is to integrate GEPs into the base rewriting
2911 // algorithm properly, this is just a short term workaround to prevent
2912 // crashes by canonicalizing such GEPs into fully vector GEPs.
2913 for (Instruction &I : instructions(F)) {
2914 if (!isa<GetElementPtrInst>(I))
2915 continue;
2916
2917 unsigned VF = 0;
2918 for (unsigned i = 0; i < I.getNumOperands(); i++)
2919 if (auto *OpndVTy = dyn_cast<VectorType>(I.getOperand(i)->getType())) {
2920 assert(VF == 0 ||
2921 VF == cast<FixedVectorType>(OpndVTy)->getNumElements());
2922 VF = cast<FixedVectorType>(OpndVTy)->getNumElements();
2923 }
2924
2925 // It's the vector to scalar traversal through the pointer operand which
2926 // confuses base pointer rewriting, so limit ourselves to that case.
2927 if (!I.getOperand(0)->getType()->isVectorTy() && VF != 0) {
2928 IRBuilder<> B(&I);
2929 auto *Splat = B.CreateVectorSplat(VF, I.getOperand(0));
2930 I.setOperand(0, Splat);
2931 MadeChange = true;
2932 }
2933 }
2934
2935 // Cache the 'defining value' relation used in the computation and
2936 // insertion of base phis and selects. This ensures that we don't insert
2937 // large numbers of duplicate base_phis. Use one cache for both
2938 // inlineGetBaseAndOffset() and insertParsePoints().
2939 DefiningValueMapTy DVCache;
2940
2941 if (!Intrinsics.empty())
2942 // Inline @gc.get.pointer.base() and @gc.get.pointer.offset() before finding
2943 // live references.
2944 MadeChange |= inlineGetBaseAndOffset(F, Intrinsics, DVCache);
2945
2946 if (!ParsePointNeeded.empty())
2947 MadeChange |= insertParsePoints(F, DT, TTI, ParsePointNeeded, DVCache);
2948
2949 return MadeChange;
2950 }
2951
2952 // liveness computation via standard dataflow
2953 // -------------------------------------------------------------------
2954
2955 // TODO: Consider using bitvectors for liveness, the set of potentially
2956 // interesting values should be small and easy to pre-compute.
2957
2958 /// Compute the live-in set for the location rbegin starting from
2959 /// the live-out set of the basic block
computeLiveInValues(BasicBlock::reverse_iterator Begin,BasicBlock::reverse_iterator End,SetVector<Value * > & LiveTmp)2960 static void computeLiveInValues(BasicBlock::reverse_iterator Begin,
2961 BasicBlock::reverse_iterator End,
2962 SetVector<Value *> &LiveTmp) {
2963 for (auto &I : make_range(Begin, End)) {
2964 // KILL/Def - Remove this definition from LiveIn
2965 LiveTmp.remove(&I);
2966
2967 // Don't consider *uses* in PHI nodes, we handle their contribution to
2968 // predecessor blocks when we seed the LiveOut sets
2969 if (isa<PHINode>(I))
2970 continue;
2971
2972 // USE - Add to the LiveIn set for this instruction
2973 for (Value *V : I.operands()) {
2974 assert(!isUnhandledGCPointerType(V->getType()) &&
2975 "support for FCA unimplemented");
2976 if (isHandledGCPointerType(V->getType()) && !isa<Constant>(V)) {
2977 // The choice to exclude all things constant here is slightly subtle.
2978 // There are two independent reasons:
2979 // - We assume that things which are constant (from LLVM's definition)
2980 // do not move at runtime. For example, the address of a global
2981 // variable is fixed, even though it's contents may not be.
2982 // - Second, we can't disallow arbitrary inttoptr constants even
2983 // if the language frontend does. Optimization passes are free to
2984 // locally exploit facts without respect to global reachability. This
2985 // can create sections of code which are dynamically unreachable and
2986 // contain just about anything. (see constants.ll in tests)
2987 LiveTmp.insert(V);
2988 }
2989 }
2990 }
2991 }
2992
computeLiveOutSeed(BasicBlock * BB,SetVector<Value * > & LiveTmp)2993 static void computeLiveOutSeed(BasicBlock *BB, SetVector<Value *> &LiveTmp) {
2994 for (BasicBlock *Succ : successors(BB)) {
2995 for (auto &I : *Succ) {
2996 PHINode *PN = dyn_cast<PHINode>(&I);
2997 if (!PN)
2998 break;
2999
3000 Value *V = PN->getIncomingValueForBlock(BB);
3001 assert(!isUnhandledGCPointerType(V->getType()) &&
3002 "support for FCA unimplemented");
3003 if (isHandledGCPointerType(V->getType()) && !isa<Constant>(V))
3004 LiveTmp.insert(V);
3005 }
3006 }
3007 }
3008
computeKillSet(BasicBlock * BB)3009 static SetVector<Value *> computeKillSet(BasicBlock *BB) {
3010 SetVector<Value *> KillSet;
3011 for (Instruction &I : *BB)
3012 if (isHandledGCPointerType(I.getType()))
3013 KillSet.insert(&I);
3014 return KillSet;
3015 }
3016
3017 #ifndef NDEBUG
3018 /// Check that the items in 'Live' dominate 'TI'. This is used as a basic
3019 /// sanity check for the liveness computation.
checkBasicSSA(DominatorTree & DT,SetVector<Value * > & Live,Instruction * TI,bool TermOkay=false)3020 static void checkBasicSSA(DominatorTree &DT, SetVector<Value *> &Live,
3021 Instruction *TI, bool TermOkay = false) {
3022 for (Value *V : Live) {
3023 if (auto *I = dyn_cast<Instruction>(V)) {
3024 // The terminator can be a member of the LiveOut set. LLVM's definition
3025 // of instruction dominance states that V does not dominate itself. As
3026 // such, we need to special case this to allow it.
3027 if (TermOkay && TI == I)
3028 continue;
3029 assert(DT.dominates(I, TI) &&
3030 "basic SSA liveness expectation violated by liveness analysis");
3031 }
3032 }
3033 }
3034
3035 /// Check that all the liveness sets used during the computation of liveness
3036 /// obey basic SSA properties. This is useful for finding cases where we miss
3037 /// a def.
checkBasicSSA(DominatorTree & DT,GCPtrLivenessData & Data,BasicBlock & BB)3038 static void checkBasicSSA(DominatorTree &DT, GCPtrLivenessData &Data,
3039 BasicBlock &BB) {
3040 checkBasicSSA(DT, Data.LiveSet[&BB], BB.getTerminator());
3041 checkBasicSSA(DT, Data.LiveOut[&BB], BB.getTerminator(), true);
3042 checkBasicSSA(DT, Data.LiveIn[&BB], BB.getTerminator());
3043 }
3044 #endif
3045
computeLiveInValues(DominatorTree & DT,Function & F,GCPtrLivenessData & Data)3046 static void computeLiveInValues(DominatorTree &DT, Function &F,
3047 GCPtrLivenessData &Data) {
3048 SmallSetVector<BasicBlock *, 32> Worklist;
3049
3050 // Seed the liveness for each individual block
3051 for (BasicBlock &BB : F) {
3052 Data.KillSet[&BB] = computeKillSet(&BB);
3053 Data.LiveSet[&BB].clear();
3054 computeLiveInValues(BB.rbegin(), BB.rend(), Data.LiveSet[&BB]);
3055
3056 #ifndef NDEBUG
3057 for (Value *Kill : Data.KillSet[&BB])
3058 assert(!Data.LiveSet[&BB].count(Kill) && "live set contains kill");
3059 #endif
3060
3061 Data.LiveOut[&BB] = SetVector<Value *>();
3062 computeLiveOutSeed(&BB, Data.LiveOut[&BB]);
3063 Data.LiveIn[&BB] = Data.LiveSet[&BB];
3064 Data.LiveIn[&BB].set_union(Data.LiveOut[&BB]);
3065 Data.LiveIn[&BB].set_subtract(Data.KillSet[&BB]);
3066 if (!Data.LiveIn[&BB].empty())
3067 Worklist.insert(pred_begin(&BB), pred_end(&BB));
3068 }
3069
3070 // Propagate that liveness until stable
3071 while (!Worklist.empty()) {
3072 BasicBlock *BB = Worklist.pop_back_val();
3073
3074 // Compute our new liveout set, then exit early if it hasn't changed despite
3075 // the contribution of our successor.
3076 SetVector<Value *> LiveOut = Data.LiveOut[BB];
3077 const auto OldLiveOutSize = LiveOut.size();
3078 for (BasicBlock *Succ : successors(BB)) {
3079 assert(Data.LiveIn.count(Succ));
3080 LiveOut.set_union(Data.LiveIn[Succ]);
3081 }
3082 // assert OutLiveOut is a subset of LiveOut
3083 if (OldLiveOutSize == LiveOut.size()) {
3084 // If the sets are the same size, then we didn't actually add anything
3085 // when unioning our successors LiveIn. Thus, the LiveIn of this block
3086 // hasn't changed.
3087 continue;
3088 }
3089 Data.LiveOut[BB] = LiveOut;
3090
3091 // Apply the effects of this basic block
3092 SetVector<Value *> LiveTmp = LiveOut;
3093 LiveTmp.set_union(Data.LiveSet[BB]);
3094 LiveTmp.set_subtract(Data.KillSet[BB]);
3095
3096 assert(Data.LiveIn.count(BB));
3097 const SetVector<Value *> &OldLiveIn = Data.LiveIn[BB];
3098 // assert: OldLiveIn is a subset of LiveTmp
3099 if (OldLiveIn.size() != LiveTmp.size()) {
3100 Data.LiveIn[BB] = LiveTmp;
3101 Worklist.insert(pred_begin(BB), pred_end(BB));
3102 }
3103 } // while (!Worklist.empty())
3104
3105 #ifndef NDEBUG
3106 // Sanity check our output against SSA properties. This helps catch any
3107 // missing kills during the above iteration.
3108 for (BasicBlock &BB : F)
3109 checkBasicSSA(DT, Data, BB);
3110 #endif
3111 }
3112
findLiveSetAtInst(Instruction * Inst,GCPtrLivenessData & Data,StatepointLiveSetTy & Out)3113 static void findLiveSetAtInst(Instruction *Inst, GCPtrLivenessData &Data,
3114 StatepointLiveSetTy &Out) {
3115 BasicBlock *BB = Inst->getParent();
3116
3117 // Note: The copy is intentional and required
3118 assert(Data.LiveOut.count(BB));
3119 SetVector<Value *> LiveOut = Data.LiveOut[BB];
3120
3121 // We want to handle the statepoint itself oddly. It's
3122 // call result is not live (normal), nor are it's arguments
3123 // (unless they're used again later). This adjustment is
3124 // specifically what we need to relocate
3125 computeLiveInValues(BB->rbegin(), ++Inst->getIterator().getReverse(),
3126 LiveOut);
3127 LiveOut.remove(Inst);
3128 Out.insert(LiveOut.begin(), LiveOut.end());
3129 }
3130
recomputeLiveInValues(GCPtrLivenessData & RevisedLivenessData,CallBase * Call,PartiallyConstructedSafepointRecord & Info)3131 static void recomputeLiveInValues(GCPtrLivenessData &RevisedLivenessData,
3132 CallBase *Call,
3133 PartiallyConstructedSafepointRecord &Info) {
3134 StatepointLiveSetTy Updated;
3135 findLiveSetAtInst(Call, RevisedLivenessData, Updated);
3136
3137 // We may have base pointers which are now live that weren't before. We need
3138 // to update the PointerToBase structure to reflect this.
3139 for (auto V : Updated)
3140 Info.PointerToBase.insert({V, V});
3141
3142 #ifndef NDEBUG
3143 for (auto V : Updated)
3144 assert(Info.PointerToBase.count(V) &&
3145 "Must be able to find base for live value!");
3146 #endif
3147
3148 // Remove any stale base mappings - this can happen since our liveness is
3149 // more precise then the one inherent in the base pointer analysis.
3150 DenseSet<Value *> ToErase;
3151 for (auto KVPair : Info.PointerToBase)
3152 if (!Updated.count(KVPair.first))
3153 ToErase.insert(KVPair.first);
3154
3155 for (auto *V : ToErase)
3156 Info.PointerToBase.erase(V);
3157
3158 #ifndef NDEBUG
3159 for (auto KVPair : Info.PointerToBase)
3160 assert(Updated.count(KVPair.first) && "record for non-live value");
3161 #endif
3162
3163 Info.LiveSet = Updated;
3164 }
3165