1 //===- CoroFrame.cpp - Builds and manipulates coroutine frame -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 // This file contains classes used to discover if for a particular value
9 // there from sue to definition that crosses a suspend block.
10 //
11 // Using the information discovered we form a Coroutine Frame structure to
12 // contain those values. All uses of those values are replaced with appropriate
13 // GEP + load from the coroutine frame. At the point of the definition we spill
14 // the value into the coroutine frame.
15 //===----------------------------------------------------------------------===//
16
17 #include "CoroInternal.h"
18 #include "llvm/ADT/BitVector.h"
19 #include "llvm/ADT/ScopeExit.h"
20 #include "llvm/ADT/SmallString.h"
21 #include "llvm/Analysis/PtrUseVisitor.h"
22 #include "llvm/Analysis/StackLifetime.h"
23 #include "llvm/Config/llvm-config.h"
24 #include "llvm/IR/CFG.h"
25 #include "llvm/IR/DIBuilder.h"
26 #include "llvm/IR/DebugInfo.h"
27 #include "llvm/IR/Dominators.h"
28 #include "llvm/IR/IRBuilder.h"
29 #include "llvm/IR/InstIterator.h"
30 #include "llvm/IR/IntrinsicInst.h"
31 #include "llvm/Support/Debug.h"
32 #include "llvm/Support/MathExtras.h"
33 #include "llvm/Support/OptimizedStructLayout.h"
34 #include "llvm/Support/circular_raw_ostream.h"
35 #include "llvm/Support/raw_ostream.h"
36 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
37 #include "llvm/Transforms/Utils/Local.h"
38 #include "llvm/Transforms/Utils/PromoteMemToReg.h"
39 #include <algorithm>
40
41 using namespace llvm;
42
43 // The "coro-suspend-crossing" flag is very noisy. There is another debug type,
44 // "coro-frame", which results in leaner debug spew.
45 #define DEBUG_TYPE "coro-suspend-crossing"
46
47 enum { SmallVectorThreshold = 32 };
48
49 // Provides two way mapping between the blocks and numbers.
50 namespace {
51 class BlockToIndexMapping {
52 SmallVector<BasicBlock *, SmallVectorThreshold> V;
53
54 public:
size() const55 size_t size() const { return V.size(); }
56
BlockToIndexMapping(Function & F)57 BlockToIndexMapping(Function &F) {
58 for (BasicBlock &BB : F)
59 V.push_back(&BB);
60 llvm::sort(V);
61 }
62
blockToIndex(BasicBlock * BB) const63 size_t blockToIndex(BasicBlock *BB) const {
64 auto *I = llvm::lower_bound(V, BB);
65 assert(I != V.end() && *I == BB && "BasicBlockNumberng: Unknown block");
66 return I - V.begin();
67 }
68
indexToBlock(unsigned Index) const69 BasicBlock *indexToBlock(unsigned Index) const { return V[Index]; }
70 };
71 } // end anonymous namespace
72
73 // The SuspendCrossingInfo maintains data that allows to answer a question
74 // whether given two BasicBlocks A and B there is a path from A to B that
75 // passes through a suspend point.
76 //
77 // For every basic block 'i' it maintains a BlockData that consists of:
78 // Consumes: a bit vector which contains a set of indices of blocks that can
79 // reach block 'i'
80 // Kills: a bit vector which contains a set of indices of blocks that can
81 // reach block 'i', but one of the path will cross a suspend point
82 // Suspend: a boolean indicating whether block 'i' contains a suspend point.
83 // End: a boolean indicating whether block 'i' contains a coro.end intrinsic.
84 //
85 namespace {
86 struct SuspendCrossingInfo {
87 BlockToIndexMapping Mapping;
88
89 struct BlockData {
90 BitVector Consumes;
91 BitVector Kills;
92 bool Suspend = false;
93 bool End = false;
94 };
95 SmallVector<BlockData, SmallVectorThreshold> Block;
96
successors__anon8d9a21160311::SuspendCrossingInfo97 iterator_range<succ_iterator> successors(BlockData const &BD) const {
98 BasicBlock *BB = Mapping.indexToBlock(&BD - &Block[0]);
99 return llvm::successors(BB);
100 }
101
getBlockData__anon8d9a21160311::SuspendCrossingInfo102 BlockData &getBlockData(BasicBlock *BB) {
103 return Block[Mapping.blockToIndex(BB)];
104 }
105
106 void dump() const;
107 void dump(StringRef Label, BitVector const &BV) const;
108
109 SuspendCrossingInfo(Function &F, coro::Shape &Shape);
110
hasPathCrossingSuspendPoint__anon8d9a21160311::SuspendCrossingInfo111 bool hasPathCrossingSuspendPoint(BasicBlock *DefBB, BasicBlock *UseBB) const {
112 size_t const DefIndex = Mapping.blockToIndex(DefBB);
113 size_t const UseIndex = Mapping.blockToIndex(UseBB);
114
115 bool const Result = Block[UseIndex].Kills[DefIndex];
116 LLVM_DEBUG(dbgs() << UseBB->getName() << " => " << DefBB->getName()
117 << " answer is " << Result << "\n");
118 return Result;
119 }
120
isDefinitionAcrossSuspend__anon8d9a21160311::SuspendCrossingInfo121 bool isDefinitionAcrossSuspend(BasicBlock *DefBB, User *U) const {
122 auto *I = cast<Instruction>(U);
123
124 // We rewrote PHINodes, so that only the ones with exactly one incoming
125 // value need to be analyzed.
126 if (auto *PN = dyn_cast<PHINode>(I))
127 if (PN->getNumIncomingValues() > 1)
128 return false;
129
130 BasicBlock *UseBB = I->getParent();
131
132 // As a special case, treat uses by an llvm.coro.suspend.retcon or an
133 // llvm.coro.suspend.async as if they were uses in the suspend's single
134 // predecessor: the uses conceptually occur before the suspend.
135 if (isa<CoroSuspendRetconInst>(I) || isa<CoroSuspendAsyncInst>(I)) {
136 UseBB = UseBB->getSinglePredecessor();
137 assert(UseBB && "should have split coro.suspend into its own block");
138 }
139
140 return hasPathCrossingSuspendPoint(DefBB, UseBB);
141 }
142
isDefinitionAcrossSuspend__anon8d9a21160311::SuspendCrossingInfo143 bool isDefinitionAcrossSuspend(Argument &A, User *U) const {
144 return isDefinitionAcrossSuspend(&A.getParent()->getEntryBlock(), U);
145 }
146
isDefinitionAcrossSuspend__anon8d9a21160311::SuspendCrossingInfo147 bool isDefinitionAcrossSuspend(Instruction &I, User *U) const {
148 auto *DefBB = I.getParent();
149
150 // As a special case, treat values produced by an llvm.coro.suspend.*
151 // as if they were defined in the single successor: the uses
152 // conceptually occur after the suspend.
153 if (isa<AnyCoroSuspendInst>(I)) {
154 DefBB = DefBB->getSingleSuccessor();
155 assert(DefBB && "should have split coro.suspend into its own block");
156 }
157
158 return isDefinitionAcrossSuspend(DefBB, U);
159 }
160
isDefinitionAcrossSuspend__anon8d9a21160311::SuspendCrossingInfo161 bool isDefinitionAcrossSuspend(Value &V, User *U) const {
162 if (auto *Arg = dyn_cast<Argument>(&V))
163 return isDefinitionAcrossSuspend(*Arg, U);
164 if (auto *Inst = dyn_cast<Instruction>(&V))
165 return isDefinitionAcrossSuspend(*Inst, U);
166
167 llvm_unreachable(
168 "Coroutine could only collect Argument and Instruction now.");
169 }
170 };
171 } // end anonymous namespace
172
173 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
dump(StringRef Label,BitVector const & BV) const174 LLVM_DUMP_METHOD void SuspendCrossingInfo::dump(StringRef Label,
175 BitVector const &BV) const {
176 dbgs() << Label << ":";
177 for (size_t I = 0, N = BV.size(); I < N; ++I)
178 if (BV[I])
179 dbgs() << " " << Mapping.indexToBlock(I)->getName();
180 dbgs() << "\n";
181 }
182
dump() const183 LLVM_DUMP_METHOD void SuspendCrossingInfo::dump() const {
184 for (size_t I = 0, N = Block.size(); I < N; ++I) {
185 BasicBlock *const B = Mapping.indexToBlock(I);
186 dbgs() << B->getName() << ":\n";
187 dump(" Consumes", Block[I].Consumes);
188 dump(" Kills", Block[I].Kills);
189 }
190 dbgs() << "\n";
191 }
192 #endif
193
SuspendCrossingInfo(Function & F,coro::Shape & Shape)194 SuspendCrossingInfo::SuspendCrossingInfo(Function &F, coro::Shape &Shape)
195 : Mapping(F) {
196 const size_t N = Mapping.size();
197 Block.resize(N);
198
199 // Initialize every block so that it consumes itself
200 for (size_t I = 0; I < N; ++I) {
201 auto &B = Block[I];
202 B.Consumes.resize(N);
203 B.Kills.resize(N);
204 B.Consumes.set(I);
205 }
206
207 // Mark all CoroEnd Blocks. We do not propagate Kills beyond coro.ends as
208 // the code beyond coro.end is reachable during initial invocation of the
209 // coroutine.
210 for (auto *CE : Shape.CoroEnds)
211 getBlockData(CE->getParent()).End = true;
212
213 // Mark all suspend blocks and indicate that they kill everything they
214 // consume. Note, that crossing coro.save also requires a spill, as any code
215 // between coro.save and coro.suspend may resume the coroutine and all of the
216 // state needs to be saved by that time.
217 auto markSuspendBlock = [&](IntrinsicInst *BarrierInst) {
218 BasicBlock *SuspendBlock = BarrierInst->getParent();
219 auto &B = getBlockData(SuspendBlock);
220 B.Suspend = true;
221 B.Kills |= B.Consumes;
222 };
223 for (auto *CSI : Shape.CoroSuspends) {
224 markSuspendBlock(CSI);
225 if (auto *Save = CSI->getCoroSave())
226 markSuspendBlock(Save);
227 }
228
229 // Iterate propagating consumes and kills until they stop changing.
230 int Iteration = 0;
231 (void)Iteration;
232
233 bool Changed;
234 do {
235 LLVM_DEBUG(dbgs() << "iteration " << ++Iteration);
236 LLVM_DEBUG(dbgs() << "==============\n");
237
238 Changed = false;
239 for (size_t I = 0; I < N; ++I) {
240 auto &B = Block[I];
241 for (BasicBlock *SI : successors(B)) {
242
243 auto SuccNo = Mapping.blockToIndex(SI);
244
245 // Saved Consumes and Kills bitsets so that it is easy to see
246 // if anything changed after propagation.
247 auto &S = Block[SuccNo];
248 auto SavedConsumes = S.Consumes;
249 auto SavedKills = S.Kills;
250
251 // Propagate Kills and Consumes from block B into its successor S.
252 S.Consumes |= B.Consumes;
253 S.Kills |= B.Kills;
254
255 // If block B is a suspend block, it should propagate kills into the
256 // its successor for every block B consumes.
257 if (B.Suspend) {
258 S.Kills |= B.Consumes;
259 }
260 if (S.Suspend) {
261 // If block S is a suspend block, it should kill all of the blocks it
262 // consumes.
263 S.Kills |= S.Consumes;
264 } else if (S.End) {
265 // If block S is an end block, it should not propagate kills as the
266 // blocks following coro.end() are reached during initial invocation
267 // of the coroutine while all the data are still available on the
268 // stack or in the registers.
269 S.Kills.reset();
270 } else {
271 // This is reached when S block it not Suspend nor coro.end and it
272 // need to make sure that it is not in the kill set.
273 S.Kills.reset(SuccNo);
274 }
275
276 // See if anything changed.
277 Changed |= (S.Kills != SavedKills) || (S.Consumes != SavedConsumes);
278
279 if (S.Kills != SavedKills) {
280 LLVM_DEBUG(dbgs() << "\nblock " << I << " follower " << SI->getName()
281 << "\n");
282 LLVM_DEBUG(dump("S.Kills", S.Kills));
283 LLVM_DEBUG(dump("SavedKills", SavedKills));
284 }
285 if (S.Consumes != SavedConsumes) {
286 LLVM_DEBUG(dbgs() << "\nblock " << I << " follower " << SI << "\n");
287 LLVM_DEBUG(dump("S.Consume", S.Consumes));
288 LLVM_DEBUG(dump("SavedCons", SavedConsumes));
289 }
290 }
291 }
292 } while (Changed);
293 LLVM_DEBUG(dump());
294 }
295
296 #undef DEBUG_TYPE // "coro-suspend-crossing"
297 #define DEBUG_TYPE "coro-frame"
298
299 namespace {
300 class FrameTypeBuilder;
301 // Mapping from the to-be-spilled value to all the users that need reload.
302 using SpillInfo = SmallMapVector<Value *, SmallVector<Instruction *, 2>, 8>;
303 struct AllocaInfo {
304 AllocaInst *Alloca;
305 DenseMap<Instruction *, llvm::Optional<APInt>> Aliases;
306 bool MayWriteBeforeCoroBegin;
AllocaInfo__anon8d9a21160511::AllocaInfo307 AllocaInfo(AllocaInst *Alloca,
308 DenseMap<Instruction *, llvm::Optional<APInt>> Aliases,
309 bool MayWriteBeforeCoroBegin)
310 : Alloca(Alloca), Aliases(std::move(Aliases)),
311 MayWriteBeforeCoroBegin(MayWriteBeforeCoroBegin) {}
312 };
313 struct FrameDataInfo {
314 // All the values (that are not allocas) that needs to be spilled to the
315 // frame.
316 SpillInfo Spills;
317 // Allocas contains all values defined as allocas that need to live in the
318 // frame.
319 SmallVector<AllocaInfo, 8> Allocas;
320
getAllDefs__anon8d9a21160511::FrameDataInfo321 SmallVector<Value *, 8> getAllDefs() const {
322 SmallVector<Value *, 8> Defs;
323 for (const auto &P : Spills)
324 Defs.push_back(P.first);
325 for (const auto &A : Allocas)
326 Defs.push_back(A.Alloca);
327 return Defs;
328 }
329
getFieldIndex__anon8d9a21160511::FrameDataInfo330 uint32_t getFieldIndex(Value *V) const {
331 auto Itr = FieldIndexMap.find(V);
332 assert(Itr != FieldIndexMap.end() &&
333 "Value does not have a frame field index");
334 return Itr->second;
335 }
336
setFieldIndex__anon8d9a21160511::FrameDataInfo337 void setFieldIndex(Value *V, uint32_t Index) {
338 assert((LayoutIndexUpdateStarted || FieldIndexMap.count(V) == 0) &&
339 "Cannot set the index for the same field twice.");
340 FieldIndexMap[V] = Index;
341 }
342
getAlign__anon8d9a21160511::FrameDataInfo343 Align getAlign(Value *V) const {
344 auto Iter = FieldAlignMap.find(V);
345 assert(Iter != FieldAlignMap.end());
346 return Iter->second;
347 }
348
setAlign__anon8d9a21160511::FrameDataInfo349 void setAlign(Value *V, Align AL) {
350 assert(FieldAlignMap.count(V) == 0);
351 FieldAlignMap.insert({V, AL});
352 }
353
getDynamicAlign__anon8d9a21160511::FrameDataInfo354 uint64_t getDynamicAlign(Value *V) const {
355 auto Iter = FieldDynamicAlignMap.find(V);
356 assert(Iter != FieldDynamicAlignMap.end());
357 return Iter->second;
358 }
359
setDynamicAlign__anon8d9a21160511::FrameDataInfo360 void setDynamicAlign(Value *V, uint64_t Align) {
361 assert(FieldDynamicAlignMap.count(V) == 0);
362 FieldDynamicAlignMap.insert({V, Align});
363 }
364
getOffset__anon8d9a21160511::FrameDataInfo365 uint64_t getOffset(Value *V) const {
366 auto Iter = FieldOffsetMap.find(V);
367 assert(Iter != FieldOffsetMap.end());
368 return Iter->second;
369 }
370
setOffset__anon8d9a21160511::FrameDataInfo371 void setOffset(Value *V, uint64_t Offset) {
372 assert(FieldOffsetMap.count(V) == 0);
373 FieldOffsetMap.insert({V, Offset});
374 }
375
376 // Remap the index of every field in the frame, using the final layout index.
377 void updateLayoutIndex(FrameTypeBuilder &B);
378
379 private:
380 // LayoutIndexUpdateStarted is used to avoid updating the index of any field
381 // twice by mistake.
382 bool LayoutIndexUpdateStarted = false;
383 // Map from values to their slot indexes on the frame. They will be first set
384 // with their original insertion field index. After the frame is built, their
385 // indexes will be updated into the final layout index.
386 DenseMap<Value *, uint32_t> FieldIndexMap;
387 // Map from values to their alignment on the frame. They would be set after
388 // the frame is built.
389 DenseMap<Value *, Align> FieldAlignMap;
390 DenseMap<Value *, uint64_t> FieldDynamicAlignMap;
391 // Map from values to their offset on the frame. They would be set after
392 // the frame is built.
393 DenseMap<Value *, uint64_t> FieldOffsetMap;
394 };
395 } // namespace
396
397 #ifndef NDEBUG
dumpSpills(StringRef Title,const SpillInfo & Spills)398 static void dumpSpills(StringRef Title, const SpillInfo &Spills) {
399 dbgs() << "------------- " << Title << "--------------\n";
400 for (const auto &E : Spills) {
401 E.first->dump();
402 dbgs() << " user: ";
403 for (auto *I : E.second)
404 I->dump();
405 }
406 }
407
dumpAllocas(const SmallVectorImpl<AllocaInfo> & Allocas)408 static void dumpAllocas(const SmallVectorImpl<AllocaInfo> &Allocas) {
409 dbgs() << "------------- Allocas --------------\n";
410 for (const auto &A : Allocas) {
411 A.Alloca->dump();
412 }
413 }
414 #endif
415
416 namespace {
417 using FieldIDType = size_t;
418 // We cannot rely solely on natural alignment of a type when building a
419 // coroutine frame and if the alignment specified on the Alloca instruction
420 // differs from the natural alignment of the alloca type we will need to insert
421 // padding.
422 class FrameTypeBuilder {
423 private:
424 struct Field {
425 uint64_t Size;
426 uint64_t Offset;
427 Type *Ty;
428 FieldIDType LayoutFieldIndex;
429 Align Alignment;
430 Align TyAlignment;
431 uint64_t DynamicAlignBuffer;
432 };
433
434 const DataLayout &DL;
435 LLVMContext &Context;
436 uint64_t StructSize = 0;
437 Align StructAlign;
438 bool IsFinished = false;
439
440 Optional<Align> MaxFrameAlignment;
441
442 SmallVector<Field, 8> Fields;
443 DenseMap<Value*, unsigned> FieldIndexByKey;
444
445 public:
FrameTypeBuilder(LLVMContext & Context,const DataLayout & DL,Optional<Align> MaxFrameAlignment)446 FrameTypeBuilder(LLVMContext &Context, const DataLayout &DL,
447 Optional<Align> MaxFrameAlignment)
448 : DL(DL), Context(Context), MaxFrameAlignment(MaxFrameAlignment) {}
449
450 /// Add a field to this structure for the storage of an `alloca`
451 /// instruction.
addFieldForAlloca(AllocaInst * AI,bool IsHeader=false)452 LLVM_NODISCARD FieldIDType addFieldForAlloca(AllocaInst *AI,
453 bool IsHeader = false) {
454 Type *Ty = AI->getAllocatedType();
455
456 // Make an array type if this is a static array allocation.
457 if (AI->isArrayAllocation()) {
458 if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize()))
459 Ty = ArrayType::get(Ty, CI->getValue().getZExtValue());
460 else
461 report_fatal_error("Coroutines cannot handle non static allocas yet");
462 }
463
464 return addField(Ty, AI->getAlign(), IsHeader);
465 }
466
467 /// We want to put the allocas whose lifetime-ranges are not overlapped
468 /// into one slot of coroutine frame.
469 /// Consider the example at:https://bugs.llvm.org/show_bug.cgi?id=45566
470 ///
471 /// cppcoro::task<void> alternative_paths(bool cond) {
472 /// if (cond) {
473 /// big_structure a;
474 /// process(a);
475 /// co_await something();
476 /// } else {
477 /// big_structure b;
478 /// process2(b);
479 /// co_await something();
480 /// }
481 /// }
482 ///
483 /// We want to put variable a and variable b in the same slot to
484 /// reduce the size of coroutine frame.
485 ///
486 /// This function use StackLifetime algorithm to partition the AllocaInsts in
487 /// Spills to non-overlapped sets in order to put Alloca in the same
488 /// non-overlapped set into the same slot in the Coroutine Frame. Then add
489 /// field for the allocas in the same non-overlapped set by using the largest
490 /// type as the field type.
491 ///
492 /// Side Effects: Because We sort the allocas, the order of allocas in the
493 /// frame may be different with the order in the source code.
494 void addFieldForAllocas(const Function &F, FrameDataInfo &FrameData,
495 coro::Shape &Shape);
496
497 /// Add a field to this structure.
addField(Type * Ty,MaybeAlign MaybeFieldAlignment,bool IsHeader=false,bool IsSpillOfValue=false)498 LLVM_NODISCARD FieldIDType addField(Type *Ty, MaybeAlign MaybeFieldAlignment,
499 bool IsHeader = false,
500 bool IsSpillOfValue = false) {
501 assert(!IsFinished && "adding fields to a finished builder");
502 assert(Ty && "must provide a type for a field");
503
504 // The field size is always the alloc size of the type.
505 uint64_t FieldSize = DL.getTypeAllocSize(Ty);
506
507 // For an alloca with size=0, we don't need to add a field and they
508 // can just point to any index in the frame. Use index 0.
509 if (FieldSize == 0) {
510 return 0;
511 }
512
513 // The field alignment might not be the type alignment, but we need
514 // to remember the type alignment anyway to build the type.
515 // If we are spilling values we don't need to worry about ABI alignment
516 // concerns.
517 Align ABIAlign = DL.getABITypeAlign(Ty);
518 Align TyAlignment = ABIAlign;
519 if (IsSpillOfValue && MaxFrameAlignment && *MaxFrameAlignment < ABIAlign)
520 TyAlignment = *MaxFrameAlignment;
521 Align FieldAlignment = MaybeFieldAlignment.value_or(TyAlignment);
522
523 // The field alignment could be bigger than the max frame case, in that case
524 // we request additional storage to be able to dynamically align the
525 // pointer.
526 uint64_t DynamicAlignBuffer = 0;
527 if (MaxFrameAlignment && (FieldAlignment > *MaxFrameAlignment)) {
528 DynamicAlignBuffer =
529 offsetToAlignment(MaxFrameAlignment->value(), FieldAlignment);
530 FieldAlignment = *MaxFrameAlignment;
531 FieldSize = FieldSize + DynamicAlignBuffer;
532 }
533
534 // Lay out header fields immediately.
535 uint64_t Offset;
536 if (IsHeader) {
537 Offset = alignTo(StructSize, FieldAlignment);
538 StructSize = Offset + FieldSize;
539
540 // Everything else has a flexible offset.
541 } else {
542 Offset = OptimizedStructLayoutField::FlexibleOffset;
543 }
544
545 Fields.push_back({FieldSize, Offset, Ty, 0, FieldAlignment, TyAlignment,
546 DynamicAlignBuffer});
547 return Fields.size() - 1;
548 }
549
550 /// Finish the layout and set the body on the given type.
551 void finish(StructType *Ty);
552
getStructSize() const553 uint64_t getStructSize() const {
554 assert(IsFinished && "not yet finished!");
555 return StructSize;
556 }
557
getStructAlign() const558 Align getStructAlign() const {
559 assert(IsFinished && "not yet finished!");
560 return StructAlign;
561 }
562
getLayoutFieldIndex(FieldIDType Id) const563 FieldIDType getLayoutFieldIndex(FieldIDType Id) const {
564 assert(IsFinished && "not yet finished!");
565 return Fields[Id].LayoutFieldIndex;
566 }
567
getLayoutField(FieldIDType Id) const568 Field getLayoutField(FieldIDType Id) const {
569 assert(IsFinished && "not yet finished!");
570 return Fields[Id];
571 }
572 };
573 } // namespace
574
updateLayoutIndex(FrameTypeBuilder & B)575 void FrameDataInfo::updateLayoutIndex(FrameTypeBuilder &B) {
576 auto Updater = [&](Value *I) {
577 auto Field = B.getLayoutField(getFieldIndex(I));
578 setFieldIndex(I, Field.LayoutFieldIndex);
579 setAlign(I, Field.Alignment);
580 uint64_t dynamicAlign =
581 Field.DynamicAlignBuffer
582 ? Field.DynamicAlignBuffer + Field.Alignment.value()
583 : 0;
584 setDynamicAlign(I, dynamicAlign);
585 setOffset(I, Field.Offset);
586 };
587 LayoutIndexUpdateStarted = true;
588 for (auto &S : Spills)
589 Updater(S.first);
590 for (const auto &A : Allocas)
591 Updater(A.Alloca);
592 LayoutIndexUpdateStarted = false;
593 }
594
addFieldForAllocas(const Function & F,FrameDataInfo & FrameData,coro::Shape & Shape)595 void FrameTypeBuilder::addFieldForAllocas(const Function &F,
596 FrameDataInfo &FrameData,
597 coro::Shape &Shape) {
598 using AllocaSetType = SmallVector<AllocaInst *, 4>;
599 SmallVector<AllocaSetType, 4> NonOverlapedAllocas;
600
601 // We need to add field for allocas at the end of this function.
602 auto AddFieldForAllocasAtExit = make_scope_exit([&]() {
603 for (auto AllocaList : NonOverlapedAllocas) {
604 auto *LargestAI = *AllocaList.begin();
605 FieldIDType Id = addFieldForAlloca(LargestAI);
606 for (auto *Alloca : AllocaList)
607 FrameData.setFieldIndex(Alloca, Id);
608 }
609 });
610
611 if (!Shape.OptimizeFrame) {
612 for (const auto &A : FrameData.Allocas) {
613 AllocaInst *Alloca = A.Alloca;
614 NonOverlapedAllocas.emplace_back(AllocaSetType(1, Alloca));
615 }
616 return;
617 }
618
619 // Because there are pathes from the lifetime.start to coro.end
620 // for each alloca, the liferanges for every alloca is overlaped
621 // in the blocks who contain coro.end and the successor blocks.
622 // So we choose to skip there blocks when we calculates the liferange
623 // for each alloca. It should be reasonable since there shouldn't be uses
624 // in these blocks and the coroutine frame shouldn't be used outside the
625 // coroutine body.
626 //
627 // Note that the user of coro.suspend may not be SwitchInst. However, this
628 // case seems too complex to handle. And it is harmless to skip these
629 // patterns since it just prevend putting the allocas to live in the same
630 // slot.
631 DenseMap<SwitchInst *, BasicBlock *> DefaultSuspendDest;
632 for (auto CoroSuspendInst : Shape.CoroSuspends) {
633 for (auto U : CoroSuspendInst->users()) {
634 if (auto *ConstSWI = dyn_cast<SwitchInst>(U)) {
635 auto *SWI = const_cast<SwitchInst *>(ConstSWI);
636 DefaultSuspendDest[SWI] = SWI->getDefaultDest();
637 SWI->setDefaultDest(SWI->getSuccessor(1));
638 }
639 }
640 }
641
642 auto ExtractAllocas = [&]() {
643 AllocaSetType Allocas;
644 Allocas.reserve(FrameData.Allocas.size());
645 for (const auto &A : FrameData.Allocas)
646 Allocas.push_back(A.Alloca);
647 return Allocas;
648 };
649 StackLifetime StackLifetimeAnalyzer(F, ExtractAllocas(),
650 StackLifetime::LivenessType::May);
651 StackLifetimeAnalyzer.run();
652 auto IsAllocaInferenre = [&](const AllocaInst *AI1, const AllocaInst *AI2) {
653 return StackLifetimeAnalyzer.getLiveRange(AI1).overlaps(
654 StackLifetimeAnalyzer.getLiveRange(AI2));
655 };
656 auto GetAllocaSize = [&](const AllocaInfo &A) {
657 Optional<TypeSize> RetSize = A.Alloca->getAllocationSizeInBits(DL);
658 assert(RetSize && "Variable Length Arrays (VLA) are not supported.\n");
659 assert(!RetSize->isScalable() && "Scalable vectors are not yet supported");
660 return RetSize->getFixedSize();
661 };
662 // Put larger allocas in the front. So the larger allocas have higher
663 // priority to merge, which can save more space potentially. Also each
664 // AllocaSet would be ordered. So we can get the largest Alloca in one
665 // AllocaSet easily.
666 sort(FrameData.Allocas, [&](const auto &Iter1, const auto &Iter2) {
667 return GetAllocaSize(Iter1) > GetAllocaSize(Iter2);
668 });
669 for (const auto &A : FrameData.Allocas) {
670 AllocaInst *Alloca = A.Alloca;
671 bool Merged = false;
672 // Try to find if the Alloca is not inferenced with any existing
673 // NonOverlappedAllocaSet. If it is true, insert the alloca to that
674 // NonOverlappedAllocaSet.
675 for (auto &AllocaSet : NonOverlapedAllocas) {
676 assert(!AllocaSet.empty() && "Processing Alloca Set is not empty.\n");
677 bool NoInference = none_of(AllocaSet, [&](auto Iter) {
678 return IsAllocaInferenre(Alloca, Iter);
679 });
680 // If the alignment of A is multiple of the alignment of B, the address
681 // of A should satisfy the requirement for aligning for B.
682 //
683 // There may be other more fine-grained strategies to handle the alignment
684 // infomation during the merging process. But it seems hard to handle
685 // these strategies and benefit little.
686 bool Alignable = [&]() -> bool {
687 auto *LargestAlloca = *AllocaSet.begin();
688 return LargestAlloca->getAlign().value() % Alloca->getAlign().value() ==
689 0;
690 }();
691 bool CouldMerge = NoInference && Alignable;
692 if (!CouldMerge)
693 continue;
694 AllocaSet.push_back(Alloca);
695 Merged = true;
696 break;
697 }
698 if (!Merged) {
699 NonOverlapedAllocas.emplace_back(AllocaSetType(1, Alloca));
700 }
701 }
702 // Recover the default target destination for each Switch statement
703 // reserved.
704 for (auto SwitchAndDefaultDest : DefaultSuspendDest) {
705 SwitchInst *SWI = SwitchAndDefaultDest.first;
706 BasicBlock *DestBB = SwitchAndDefaultDest.second;
707 SWI->setDefaultDest(DestBB);
708 }
709 // This Debug Info could tell us which allocas are merged into one slot.
710 LLVM_DEBUG(for (auto &AllocaSet
711 : NonOverlapedAllocas) {
712 if (AllocaSet.size() > 1) {
713 dbgs() << "In Function:" << F.getName() << "\n";
714 dbgs() << "Find Union Set "
715 << "\n";
716 dbgs() << "\tAllocas are \n";
717 for (auto Alloca : AllocaSet)
718 dbgs() << "\t\t" << *Alloca << "\n";
719 }
720 });
721 }
722
finish(StructType * Ty)723 void FrameTypeBuilder::finish(StructType *Ty) {
724 assert(!IsFinished && "already finished!");
725
726 // Prepare the optimal-layout field array.
727 // The Id in the layout field is a pointer to our Field for it.
728 SmallVector<OptimizedStructLayoutField, 8> LayoutFields;
729 LayoutFields.reserve(Fields.size());
730 for (auto &Field : Fields) {
731 LayoutFields.emplace_back(&Field, Field.Size, Field.Alignment,
732 Field.Offset);
733 }
734
735 // Perform layout.
736 auto SizeAndAlign = performOptimizedStructLayout(LayoutFields);
737 StructSize = SizeAndAlign.first;
738 StructAlign = SizeAndAlign.second;
739
740 auto getField = [](const OptimizedStructLayoutField &LayoutField) -> Field & {
741 return *static_cast<Field *>(const_cast<void*>(LayoutField.Id));
742 };
743
744 // We need to produce a packed struct type if there's a field whose
745 // assigned offset isn't a multiple of its natural type alignment.
746 bool Packed = [&] {
747 for (auto &LayoutField : LayoutFields) {
748 auto &F = getField(LayoutField);
749 if (!isAligned(F.TyAlignment, LayoutField.Offset))
750 return true;
751 }
752 return false;
753 }();
754
755 // Build the struct body.
756 SmallVector<Type*, 16> FieldTypes;
757 FieldTypes.reserve(LayoutFields.size() * 3 / 2);
758 uint64_t LastOffset = 0;
759 for (auto &LayoutField : LayoutFields) {
760 auto &F = getField(LayoutField);
761
762 auto Offset = LayoutField.Offset;
763
764 // Add a padding field if there's a padding gap and we're either
765 // building a packed struct or the padding gap is more than we'd
766 // get from aligning to the field type's natural alignment.
767 assert(Offset >= LastOffset);
768 if (Offset != LastOffset) {
769 if (Packed || alignTo(LastOffset, F.TyAlignment) != Offset)
770 FieldTypes.push_back(ArrayType::get(Type::getInt8Ty(Context),
771 Offset - LastOffset));
772 }
773
774 F.Offset = Offset;
775 F.LayoutFieldIndex = FieldTypes.size();
776
777 FieldTypes.push_back(F.Ty);
778 if (F.DynamicAlignBuffer) {
779 FieldTypes.push_back(
780 ArrayType::get(Type::getInt8Ty(Context), F.DynamicAlignBuffer));
781 }
782 LastOffset = Offset + F.Size;
783 }
784
785 Ty->setBody(FieldTypes, Packed);
786
787 #ifndef NDEBUG
788 // Check that the IR layout matches the offsets we expect.
789 auto Layout = DL.getStructLayout(Ty);
790 for (auto &F : Fields) {
791 assert(Ty->getElementType(F.LayoutFieldIndex) == F.Ty);
792 assert(Layout->getElementOffset(F.LayoutFieldIndex) == F.Offset);
793 }
794 #endif
795
796 IsFinished = true;
797 }
798
cacheDIVar(FrameDataInfo & FrameData,DenseMap<Value *,DILocalVariable * > & DIVarCache)799 static void cacheDIVar(FrameDataInfo &FrameData,
800 DenseMap<Value *, DILocalVariable *> &DIVarCache) {
801 for (auto *V : FrameData.getAllDefs()) {
802 if (DIVarCache.find(V) != DIVarCache.end())
803 continue;
804
805 auto DDIs = FindDbgDeclareUses(V);
806 auto *I = llvm::find_if(DDIs, [](DbgDeclareInst *DDI) {
807 return DDI->getExpression()->getNumElements() == 0;
808 });
809 if (I != DDIs.end())
810 DIVarCache.insert({V, (*I)->getVariable()});
811 }
812 }
813
814 /// Create name for Type. It uses MDString to store new created string to
815 /// avoid memory leak.
solveTypeName(Type * Ty)816 static StringRef solveTypeName(Type *Ty) {
817 if (Ty->isIntegerTy()) {
818 // The longest name in common may be '__int_128', which has 9 bits.
819 SmallString<16> Buffer;
820 raw_svector_ostream OS(Buffer);
821 OS << "__int_" << cast<IntegerType>(Ty)->getBitWidth();
822 auto *MDName = MDString::get(Ty->getContext(), OS.str());
823 return MDName->getString();
824 }
825
826 if (Ty->isFloatingPointTy()) {
827 if (Ty->isFloatTy())
828 return "__float_";
829 if (Ty->isDoubleTy())
830 return "__double_";
831 return "__floating_type_";
832 }
833
834 if (auto *PtrTy = dyn_cast<PointerType>(Ty)) {
835 if (PtrTy->isOpaque())
836 return "PointerType";
837 Type *PointeeTy = PtrTy->getNonOpaquePointerElementType();
838 auto Name = solveTypeName(PointeeTy);
839 if (Name == "UnknownType")
840 return "PointerType";
841 SmallString<16> Buffer;
842 Twine(Name + "_Ptr").toStringRef(Buffer);
843 auto *MDName = MDString::get(Ty->getContext(), Buffer.str());
844 return MDName->getString();
845 }
846
847 if (Ty->isStructTy()) {
848 if (!cast<StructType>(Ty)->hasName())
849 return "__LiteralStructType_";
850
851 auto Name = Ty->getStructName();
852
853 SmallString<16> Buffer(Name);
854 for (auto &Iter : Buffer)
855 if (Iter == '.' || Iter == ':')
856 Iter = '_';
857 auto *MDName = MDString::get(Ty->getContext(), Buffer.str());
858 return MDName->getString();
859 }
860
861 return "UnknownType";
862 }
863
solveDIType(DIBuilder & Builder,Type * Ty,const DataLayout & Layout,DIScope * Scope,unsigned LineNum,DenseMap<Type *,DIType * > & DITypeCache)864 static DIType *solveDIType(DIBuilder &Builder, Type *Ty,
865 const DataLayout &Layout, DIScope *Scope,
866 unsigned LineNum,
867 DenseMap<Type *, DIType *> &DITypeCache) {
868 if (DIType *DT = DITypeCache.lookup(Ty))
869 return DT;
870
871 StringRef Name = solveTypeName(Ty);
872
873 DIType *RetType = nullptr;
874
875 if (Ty->isIntegerTy()) {
876 auto BitWidth = cast<IntegerType>(Ty)->getBitWidth();
877 RetType = Builder.createBasicType(Name, BitWidth, dwarf::DW_ATE_signed,
878 llvm::DINode::FlagArtificial);
879 } else if (Ty->isFloatingPointTy()) {
880 RetType = Builder.createBasicType(Name, Layout.getTypeSizeInBits(Ty),
881 dwarf::DW_ATE_float,
882 llvm::DINode::FlagArtificial);
883 } else if (Ty->isPointerTy()) {
884 // Construct PointerType points to null (aka void *) instead of exploring
885 // pointee type to avoid infinite search problem. For example, we would be
886 // in trouble if we traverse recursively:
887 //
888 // struct Node {
889 // Node* ptr;
890 // };
891 RetType = Builder.createPointerType(nullptr, Layout.getTypeSizeInBits(Ty),
892 Layout.getABITypeAlignment(Ty),
893 /*DWARFAddressSpace=*/None, Name);
894 } else if (Ty->isStructTy()) {
895 auto *DIStruct = Builder.createStructType(
896 Scope, Name, Scope->getFile(), LineNum, Layout.getTypeSizeInBits(Ty),
897 Layout.getPrefTypeAlignment(Ty), llvm::DINode::FlagArtificial, nullptr,
898 llvm::DINodeArray());
899
900 auto *StructTy = cast<StructType>(Ty);
901 SmallVector<Metadata *, 16> Elements;
902 for (unsigned I = 0; I < StructTy->getNumElements(); I++) {
903 DIType *DITy = solveDIType(Builder, StructTy->getElementType(I), Layout,
904 Scope, LineNum, DITypeCache);
905 assert(DITy);
906 Elements.push_back(Builder.createMemberType(
907 Scope, DITy->getName(), Scope->getFile(), LineNum,
908 DITy->getSizeInBits(), DITy->getAlignInBits(),
909 Layout.getStructLayout(StructTy)->getElementOffsetInBits(I),
910 llvm::DINode::FlagArtificial, DITy));
911 }
912
913 Builder.replaceArrays(DIStruct, Builder.getOrCreateArray(Elements));
914
915 RetType = DIStruct;
916 } else {
917 LLVM_DEBUG(dbgs() << "Unresolved Type: " << *Ty << "\n");
918 TypeSize Size = Layout.getTypeSizeInBits(Ty);
919 auto *CharSizeType = Builder.createBasicType(
920 Name, 8, dwarf::DW_ATE_unsigned_char, llvm::DINode::FlagArtificial);
921
922 if (Size <= 8)
923 RetType = CharSizeType;
924 else {
925 if (Size % 8 != 0)
926 Size = TypeSize::Fixed(Size + 8 - (Size % 8));
927
928 RetType = Builder.createArrayType(
929 Size, Layout.getPrefTypeAlign(Ty).value(), CharSizeType,
930 Builder.getOrCreateArray(Builder.getOrCreateSubrange(0, Size / 8)));
931 }
932 }
933
934 DITypeCache.insert({Ty, RetType});
935 return RetType;
936 }
937
938 /// Build artificial debug info for C++ coroutine frames to allow users to
939 /// inspect the contents of the frame directly
940 ///
941 /// Create Debug information for coroutine frame with debug name "__coro_frame".
942 /// The debug information for the fields of coroutine frame is constructed from
943 /// the following way:
944 /// 1. For all the value in the Frame, we search the use of dbg.declare to find
945 /// the corresponding debug variables for the value. If we can find the
946 /// debug variable, we can get full and accurate debug information.
947 /// 2. If we can't get debug information in step 1 and 2, we could only try to
948 /// build the DIType by Type. We did this in solveDIType. We only handle
949 /// integer, float, double, integer type and struct type for now.
buildFrameDebugInfo(Function & F,coro::Shape & Shape,FrameDataInfo & FrameData)950 static void buildFrameDebugInfo(Function &F, coro::Shape &Shape,
951 FrameDataInfo &FrameData) {
952 DISubprogram *DIS = F.getSubprogram();
953 // If there is no DISubprogram for F, it implies the Function are not compiled
954 // with debug info. So we also don't need to generate debug info for the frame
955 // neither.
956 if (!DIS || !DIS->getUnit() ||
957 !dwarf::isCPlusPlus(
958 (dwarf::SourceLanguage)DIS->getUnit()->getSourceLanguage()))
959 return;
960
961 assert(Shape.ABI == coro::ABI::Switch &&
962 "We could only build debug infomation for C++ coroutine now.\n");
963
964 DIBuilder DBuilder(*F.getParent(), /*AllowUnresolved*/ false);
965
966 AllocaInst *PromiseAlloca = Shape.getPromiseAlloca();
967 assert(PromiseAlloca &&
968 "Coroutine with switch ABI should own Promise alloca");
969
970 TinyPtrVector<DbgDeclareInst *> DIs = FindDbgDeclareUses(PromiseAlloca);
971 if (DIs.empty())
972 return;
973
974 DbgDeclareInst *PromiseDDI = DIs.front();
975 DILocalVariable *PromiseDIVariable = PromiseDDI->getVariable();
976 DILocalScope *PromiseDIScope = PromiseDIVariable->getScope();
977 DIFile *DFile = PromiseDIScope->getFile();
978 DILocation *DILoc = PromiseDDI->getDebugLoc().get();
979 unsigned LineNum = PromiseDIVariable->getLine();
980
981 DICompositeType *FrameDITy = DBuilder.createStructType(
982 DIS->getUnit(), Twine(F.getName() + ".coro_frame_ty").str(),
983 DFile, LineNum, Shape.FrameSize * 8,
984 Shape.FrameAlign.value() * 8, llvm::DINode::FlagArtificial, nullptr,
985 llvm::DINodeArray());
986 StructType *FrameTy = Shape.FrameTy;
987 SmallVector<Metadata *, 16> Elements;
988 DataLayout Layout = F.getParent()->getDataLayout();
989
990 DenseMap<Value *, DILocalVariable *> DIVarCache;
991 cacheDIVar(FrameData, DIVarCache);
992
993 unsigned ResumeIndex = coro::Shape::SwitchFieldIndex::Resume;
994 unsigned DestroyIndex = coro::Shape::SwitchFieldIndex::Destroy;
995 unsigned IndexIndex = Shape.SwitchLowering.IndexField;
996
997 DenseMap<unsigned, StringRef> NameCache;
998 NameCache.insert({ResumeIndex, "__resume_fn"});
999 NameCache.insert({DestroyIndex, "__destroy_fn"});
1000 NameCache.insert({IndexIndex, "__coro_index"});
1001
1002 Type *ResumeFnTy = FrameTy->getElementType(ResumeIndex),
1003 *DestroyFnTy = FrameTy->getElementType(DestroyIndex),
1004 *IndexTy = FrameTy->getElementType(IndexIndex);
1005
1006 DenseMap<unsigned, DIType *> TyCache;
1007 TyCache.insert(
1008 {ResumeIndex, DBuilder.createPointerType(
1009 nullptr, Layout.getTypeSizeInBits(ResumeFnTy))});
1010 TyCache.insert(
1011 {DestroyIndex, DBuilder.createPointerType(
1012 nullptr, Layout.getTypeSizeInBits(DestroyFnTy))});
1013
1014 /// FIXME: If we fill the field `SizeInBits` with the actual size of
1015 /// __coro_index in bits, then __coro_index wouldn't show in the debugger.
1016 TyCache.insert({IndexIndex, DBuilder.createBasicType(
1017 "__coro_index",
1018 (Layout.getTypeSizeInBits(IndexTy) < 8)
1019 ? 8
1020 : Layout.getTypeSizeInBits(IndexTy),
1021 dwarf::DW_ATE_unsigned_char)});
1022
1023 for (auto *V : FrameData.getAllDefs()) {
1024 if (DIVarCache.find(V) == DIVarCache.end())
1025 continue;
1026
1027 auto Index = FrameData.getFieldIndex(V);
1028
1029 NameCache.insert({Index, DIVarCache[V]->getName()});
1030 TyCache.insert({Index, DIVarCache[V]->getType()});
1031 }
1032
1033 // Cache from index to (Align, Offset Pair)
1034 DenseMap<unsigned, std::pair<unsigned, unsigned>> OffsetCache;
1035 // The Align and Offset of Resume function and Destroy function are fixed.
1036 OffsetCache.insert({ResumeIndex, {8, 0}});
1037 OffsetCache.insert({DestroyIndex, {8, 8}});
1038 OffsetCache.insert(
1039 {IndexIndex,
1040 {Shape.SwitchLowering.IndexAlign, Shape.SwitchLowering.IndexOffset}});
1041
1042 for (auto *V : FrameData.getAllDefs()) {
1043 auto Index = FrameData.getFieldIndex(V);
1044
1045 OffsetCache.insert(
1046 {Index, {FrameData.getAlign(V).value(), FrameData.getOffset(V)}});
1047 }
1048
1049 DenseMap<Type *, DIType *> DITypeCache;
1050 // This counter is used to avoid same type names. e.g., there would be
1051 // many i32 and i64 types in one coroutine. And we would use i32_0 and
1052 // i32_1 to avoid the same type. Since it makes no sense the name of the
1053 // fields confilicts with each other.
1054 unsigned UnknownTypeNum = 0;
1055 for (unsigned Index = 0; Index < FrameTy->getNumElements(); Index++) {
1056 if (OffsetCache.find(Index) == OffsetCache.end())
1057 continue;
1058
1059 std::string Name;
1060 uint64_t SizeInBits;
1061 uint32_t AlignInBits;
1062 uint64_t OffsetInBits;
1063 DIType *DITy = nullptr;
1064
1065 Type *Ty = FrameTy->getElementType(Index);
1066 assert(Ty->isSized() && "We can't handle type which is not sized.\n");
1067 SizeInBits = Layout.getTypeSizeInBits(Ty).getFixedSize();
1068 AlignInBits = OffsetCache[Index].first * 8;
1069 OffsetInBits = OffsetCache[Index].second * 8;
1070
1071 if (NameCache.find(Index) != NameCache.end()) {
1072 Name = NameCache[Index].str();
1073 DITy = TyCache[Index];
1074 } else {
1075 DITy = solveDIType(DBuilder, Ty, Layout, FrameDITy, LineNum, DITypeCache);
1076 assert(DITy && "SolveDIType shouldn't return nullptr.\n");
1077 Name = DITy->getName().str();
1078 Name += "_" + std::to_string(UnknownTypeNum);
1079 UnknownTypeNum++;
1080 }
1081
1082 Elements.push_back(DBuilder.createMemberType(
1083 FrameDITy, Name, DFile, LineNum, SizeInBits, AlignInBits, OffsetInBits,
1084 llvm::DINode::FlagArtificial, DITy));
1085 }
1086
1087 DBuilder.replaceArrays(FrameDITy, DBuilder.getOrCreateArray(Elements));
1088
1089 auto *FrameDIVar = DBuilder.createAutoVariable(PromiseDIScope, "__coro_frame",
1090 DFile, LineNum, FrameDITy,
1091 true, DINode::FlagArtificial);
1092 assert(FrameDIVar->isValidLocationForIntrinsic(PromiseDDI->getDebugLoc()));
1093
1094 // Subprogram would have ContainedNodes field which records the debug
1095 // variables it contained. So we need to add __coro_frame to the
1096 // ContainedNodes of it.
1097 //
1098 // If we don't add __coro_frame to the RetainedNodes, user may get
1099 // `no symbol __coro_frame in context` rather than `__coro_frame`
1100 // is optimized out, which is more precise.
1101 if (auto *SubProgram = dyn_cast<DISubprogram>(PromiseDIScope)) {
1102 auto RetainedNodes = SubProgram->getRetainedNodes();
1103 SmallVector<Metadata *, 32> RetainedNodesVec(RetainedNodes.begin(),
1104 RetainedNodes.end());
1105 RetainedNodesVec.push_back(FrameDIVar);
1106 SubProgram->replaceOperandWith(
1107 7, (MDTuple::get(F.getContext(), RetainedNodesVec)));
1108 }
1109
1110 DBuilder.insertDeclare(Shape.FramePtr, FrameDIVar,
1111 DBuilder.createExpression(), DILoc,
1112 Shape.getInsertPtAfterFramePtr());
1113 }
1114
1115 // Build a struct that will keep state for an active coroutine.
1116 // struct f.frame {
1117 // ResumeFnTy ResumeFnAddr;
1118 // ResumeFnTy DestroyFnAddr;
1119 // int ResumeIndex;
1120 // ... promise (if present) ...
1121 // ... spills ...
1122 // };
buildFrameType(Function & F,coro::Shape & Shape,FrameDataInfo & FrameData)1123 static StructType *buildFrameType(Function &F, coro::Shape &Shape,
1124 FrameDataInfo &FrameData) {
1125 LLVMContext &C = F.getContext();
1126 const DataLayout &DL = F.getParent()->getDataLayout();
1127 StructType *FrameTy = [&] {
1128 SmallString<32> Name(F.getName());
1129 Name.append(".Frame");
1130 return StructType::create(C, Name);
1131 }();
1132
1133 // We will use this value to cap the alignment of spilled values.
1134 Optional<Align> MaxFrameAlignment;
1135 if (Shape.ABI == coro::ABI::Async)
1136 MaxFrameAlignment = Shape.AsyncLowering.getContextAlignment();
1137 FrameTypeBuilder B(C, DL, MaxFrameAlignment);
1138
1139 AllocaInst *PromiseAlloca = Shape.getPromiseAlloca();
1140 Optional<FieldIDType> SwitchIndexFieldId;
1141
1142 if (Shape.ABI == coro::ABI::Switch) {
1143 auto *FramePtrTy = FrameTy->getPointerTo();
1144 auto *FnTy = FunctionType::get(Type::getVoidTy(C), FramePtrTy,
1145 /*IsVarArg=*/false);
1146 auto *FnPtrTy = FnTy->getPointerTo();
1147
1148 // Add header fields for the resume and destroy functions.
1149 // We can rely on these being perfectly packed.
1150 (void)B.addField(FnPtrTy, None, /*header*/ true);
1151 (void)B.addField(FnPtrTy, None, /*header*/ true);
1152
1153 // PromiseAlloca field needs to be explicitly added here because it's
1154 // a header field with a fixed offset based on its alignment. Hence it
1155 // needs special handling and cannot be added to FrameData.Allocas.
1156 if (PromiseAlloca)
1157 FrameData.setFieldIndex(
1158 PromiseAlloca, B.addFieldForAlloca(PromiseAlloca, /*header*/ true));
1159
1160 // Add a field to store the suspend index. This doesn't need to
1161 // be in the header.
1162 unsigned IndexBits = std::max(1U, Log2_64_Ceil(Shape.CoroSuspends.size()));
1163 Type *IndexType = Type::getIntNTy(C, IndexBits);
1164
1165 SwitchIndexFieldId = B.addField(IndexType, None);
1166 } else {
1167 assert(PromiseAlloca == nullptr && "lowering doesn't support promises");
1168 }
1169
1170 // Because multiple allocas may own the same field slot,
1171 // we add allocas to field here.
1172 B.addFieldForAllocas(F, FrameData, Shape);
1173 // Add PromiseAlloca to Allocas list so that
1174 // 1. updateLayoutIndex could update its index after
1175 // `performOptimizedStructLayout`
1176 // 2. it is processed in insertSpills.
1177 if (Shape.ABI == coro::ABI::Switch && PromiseAlloca)
1178 // We assume that the promise alloca won't be modified before
1179 // CoroBegin and no alias will be create before CoroBegin.
1180 FrameData.Allocas.emplace_back(
1181 PromiseAlloca, DenseMap<Instruction *, llvm::Optional<APInt>>{}, false);
1182 // Create an entry for every spilled value.
1183 for (auto &S : FrameData.Spills) {
1184 Type *FieldType = S.first->getType();
1185 // For byval arguments, we need to store the pointed value in the frame,
1186 // instead of the pointer itself.
1187 if (const Argument *A = dyn_cast<Argument>(S.first))
1188 if (A->hasByValAttr())
1189 FieldType = A->getParamByValType();
1190 FieldIDType Id =
1191 B.addField(FieldType, None, false /*header*/, true /*IsSpillOfValue*/);
1192 FrameData.setFieldIndex(S.first, Id);
1193 }
1194
1195 B.finish(FrameTy);
1196 FrameData.updateLayoutIndex(B);
1197 Shape.FrameAlign = B.getStructAlign();
1198 Shape.FrameSize = B.getStructSize();
1199
1200 switch (Shape.ABI) {
1201 case coro::ABI::Switch: {
1202 // In the switch ABI, remember the switch-index field.
1203 auto IndexField = B.getLayoutField(*SwitchIndexFieldId);
1204 Shape.SwitchLowering.IndexField = IndexField.LayoutFieldIndex;
1205 Shape.SwitchLowering.IndexAlign = IndexField.Alignment.value();
1206 Shape.SwitchLowering.IndexOffset = IndexField.Offset;
1207
1208 // Also round the frame size up to a multiple of its alignment, as is
1209 // generally expected in C/C++.
1210 Shape.FrameSize = alignTo(Shape.FrameSize, Shape.FrameAlign);
1211 break;
1212 }
1213
1214 // In the retcon ABI, remember whether the frame is inline in the storage.
1215 case coro::ABI::Retcon:
1216 case coro::ABI::RetconOnce: {
1217 auto Id = Shape.getRetconCoroId();
1218 Shape.RetconLowering.IsFrameInlineInStorage
1219 = (B.getStructSize() <= Id->getStorageSize() &&
1220 B.getStructAlign() <= Id->getStorageAlignment());
1221 break;
1222 }
1223 case coro::ABI::Async: {
1224 Shape.AsyncLowering.FrameOffset =
1225 alignTo(Shape.AsyncLowering.ContextHeaderSize, Shape.FrameAlign);
1226 // Also make the final context size a multiple of the context alignment to
1227 // make allocation easier for allocators.
1228 Shape.AsyncLowering.ContextSize =
1229 alignTo(Shape.AsyncLowering.FrameOffset + Shape.FrameSize,
1230 Shape.AsyncLowering.getContextAlignment());
1231 if (Shape.AsyncLowering.getContextAlignment() < Shape.FrameAlign) {
1232 report_fatal_error(
1233 "The alignment requirment of frame variables cannot be higher than "
1234 "the alignment of the async function context");
1235 }
1236 break;
1237 }
1238 }
1239
1240 return FrameTy;
1241 }
1242
1243 // We use a pointer use visitor to track how an alloca is being used.
1244 // The goal is to be able to answer the following three questions:
1245 // 1. Should this alloca be allocated on the frame instead.
1246 // 2. Could the content of the alloca be modified prior to CoroBegn, which would
1247 // require copying the data from alloca to the frame after CoroBegin.
1248 // 3. Is there any alias created for this alloca prior to CoroBegin, but used
1249 // after CoroBegin. In that case, we will need to recreate the alias after
1250 // CoroBegin based off the frame. To answer question 1, we track two things:
1251 // a. List of all BasicBlocks that use this alloca or any of the aliases of
1252 // the alloca. In the end, we check if there exists any two basic blocks that
1253 // cross suspension points. If so, this alloca must be put on the frame. b.
1254 // Whether the alloca or any alias of the alloca is escaped at some point,
1255 // either by storing the address somewhere, or the address is used in a
1256 // function call that might capture. If it's ever escaped, this alloca must be
1257 // put on the frame conservatively.
1258 // To answer quetion 2, we track through the variable MayWriteBeforeCoroBegin.
1259 // Whenever a potential write happens, either through a store instruction, a
1260 // function call or any of the memory intrinsics, we check whether this
1261 // instruction is prior to CoroBegin. To answer question 3, we track the offsets
1262 // of all aliases created for the alloca prior to CoroBegin but used after
1263 // CoroBegin. llvm::Optional is used to be able to represent the case when the
1264 // offset is unknown (e.g. when you have a PHINode that takes in different
1265 // offset values). We cannot handle unknown offsets and will assert. This is the
1266 // potential issue left out. An ideal solution would likely require a
1267 // significant redesign.
1268 namespace {
1269 struct AllocaUseVisitor : PtrUseVisitor<AllocaUseVisitor> {
1270 using Base = PtrUseVisitor<AllocaUseVisitor>;
AllocaUseVisitor__anon8d9a21161311::AllocaUseVisitor1271 AllocaUseVisitor(const DataLayout &DL, const DominatorTree &DT,
1272 const CoroBeginInst &CB, const SuspendCrossingInfo &Checker,
1273 bool ShouldUseLifetimeStartInfo)
1274 : PtrUseVisitor(DL), DT(DT), CoroBegin(CB), Checker(Checker),
1275 ShouldUseLifetimeStartInfo(ShouldUseLifetimeStartInfo) {}
1276
visit__anon8d9a21161311::AllocaUseVisitor1277 void visit(Instruction &I) {
1278 Users.insert(&I);
1279 Base::visit(I);
1280 // If the pointer is escaped prior to CoroBegin, we have to assume it would
1281 // be written into before CoroBegin as well.
1282 if (PI.isEscaped() && !DT.dominates(&CoroBegin, PI.getEscapingInst())) {
1283 MayWriteBeforeCoroBegin = true;
1284 }
1285 }
1286 // We need to provide this overload as PtrUseVisitor uses a pointer based
1287 // visiting function.
visit__anon8d9a21161311::AllocaUseVisitor1288 void visit(Instruction *I) { return visit(*I); }
1289
visitPHINode__anon8d9a21161311::AllocaUseVisitor1290 void visitPHINode(PHINode &I) {
1291 enqueueUsers(I);
1292 handleAlias(I);
1293 }
1294
visitSelectInst__anon8d9a21161311::AllocaUseVisitor1295 void visitSelectInst(SelectInst &I) {
1296 enqueueUsers(I);
1297 handleAlias(I);
1298 }
1299
visitStoreInst__anon8d9a21161311::AllocaUseVisitor1300 void visitStoreInst(StoreInst &SI) {
1301 // Regardless whether the alias of the alloca is the value operand or the
1302 // pointer operand, we need to assume the alloca is been written.
1303 handleMayWrite(SI);
1304
1305 if (SI.getValueOperand() != U->get())
1306 return;
1307
1308 // We are storing the pointer into a memory location, potentially escaping.
1309 // As an optimization, we try to detect simple cases where it doesn't
1310 // actually escape, for example:
1311 // %ptr = alloca ..
1312 // %addr = alloca ..
1313 // store %ptr, %addr
1314 // %x = load %addr
1315 // ..
1316 // If %addr is only used by loading from it, we could simply treat %x as
1317 // another alias of %ptr, and not considering %ptr being escaped.
1318 auto IsSimpleStoreThenLoad = [&]() {
1319 auto *AI = dyn_cast<AllocaInst>(SI.getPointerOperand());
1320 // If the memory location we are storing to is not an alloca, it
1321 // could be an alias of some other memory locations, which is difficult
1322 // to analyze.
1323 if (!AI)
1324 return false;
1325 // StoreAliases contains aliases of the memory location stored into.
1326 SmallVector<Instruction *, 4> StoreAliases = {AI};
1327 while (!StoreAliases.empty()) {
1328 Instruction *I = StoreAliases.pop_back_val();
1329 for (User *U : I->users()) {
1330 // If we are loading from the memory location, we are creating an
1331 // alias of the original pointer.
1332 if (auto *LI = dyn_cast<LoadInst>(U)) {
1333 enqueueUsers(*LI);
1334 handleAlias(*LI);
1335 continue;
1336 }
1337 // If we are overriding the memory location, the pointer certainly
1338 // won't escape.
1339 if (auto *S = dyn_cast<StoreInst>(U))
1340 if (S->getPointerOperand() == I)
1341 continue;
1342 if (auto *II = dyn_cast<IntrinsicInst>(U))
1343 if (II->isLifetimeStartOrEnd())
1344 continue;
1345 // BitCastInst creats aliases of the memory location being stored
1346 // into.
1347 if (auto *BI = dyn_cast<BitCastInst>(U)) {
1348 StoreAliases.push_back(BI);
1349 continue;
1350 }
1351 return false;
1352 }
1353 }
1354
1355 return true;
1356 };
1357
1358 if (!IsSimpleStoreThenLoad())
1359 PI.setEscaped(&SI);
1360 }
1361
1362 // All mem intrinsics modify the data.
visitMemIntrinsic__anon8d9a21161311::AllocaUseVisitor1363 void visitMemIntrinsic(MemIntrinsic &MI) { handleMayWrite(MI); }
1364
visitBitCastInst__anon8d9a21161311::AllocaUseVisitor1365 void visitBitCastInst(BitCastInst &BC) {
1366 Base::visitBitCastInst(BC);
1367 handleAlias(BC);
1368 }
1369
visitAddrSpaceCastInst__anon8d9a21161311::AllocaUseVisitor1370 void visitAddrSpaceCastInst(AddrSpaceCastInst &ASC) {
1371 Base::visitAddrSpaceCastInst(ASC);
1372 handleAlias(ASC);
1373 }
1374
visitGetElementPtrInst__anon8d9a21161311::AllocaUseVisitor1375 void visitGetElementPtrInst(GetElementPtrInst &GEPI) {
1376 // The base visitor will adjust Offset accordingly.
1377 Base::visitGetElementPtrInst(GEPI);
1378 handleAlias(GEPI);
1379 }
1380
visitIntrinsicInst__anon8d9a21161311::AllocaUseVisitor1381 void visitIntrinsicInst(IntrinsicInst &II) {
1382 // When we found the lifetime markers refers to a
1383 // subrange of the original alloca, ignore the lifetime
1384 // markers to avoid misleading the analysis.
1385 if (II.getIntrinsicID() != Intrinsic::lifetime_start || !IsOffsetKnown ||
1386 !Offset.isZero())
1387 return Base::visitIntrinsicInst(II);
1388 LifetimeStarts.insert(&II);
1389 }
1390
visitCallBase__anon8d9a21161311::AllocaUseVisitor1391 void visitCallBase(CallBase &CB) {
1392 for (unsigned Op = 0, OpCount = CB.arg_size(); Op < OpCount; ++Op)
1393 if (U->get() == CB.getArgOperand(Op) && !CB.doesNotCapture(Op))
1394 PI.setEscaped(&CB);
1395 handleMayWrite(CB);
1396 }
1397
getShouldLiveOnFrame__anon8d9a21161311::AllocaUseVisitor1398 bool getShouldLiveOnFrame() const {
1399 if (!ShouldLiveOnFrame)
1400 ShouldLiveOnFrame = computeShouldLiveOnFrame();
1401 return *ShouldLiveOnFrame;
1402 }
1403
getMayWriteBeforeCoroBegin__anon8d9a21161311::AllocaUseVisitor1404 bool getMayWriteBeforeCoroBegin() const { return MayWriteBeforeCoroBegin; }
1405
getAliasesCopy__anon8d9a21161311::AllocaUseVisitor1406 DenseMap<Instruction *, llvm::Optional<APInt>> getAliasesCopy() const {
1407 assert(getShouldLiveOnFrame() && "This method should only be called if the "
1408 "alloca needs to live on the frame.");
1409 for (const auto &P : AliasOffetMap)
1410 if (!P.second)
1411 report_fatal_error("Unable to handle an alias with unknown offset "
1412 "created before CoroBegin.");
1413 return AliasOffetMap;
1414 }
1415
1416 private:
1417 const DominatorTree &DT;
1418 const CoroBeginInst &CoroBegin;
1419 const SuspendCrossingInfo &Checker;
1420 // All alias to the original AllocaInst, created before CoroBegin and used
1421 // after CoroBegin. Each entry contains the instruction and the offset in the
1422 // original Alloca. They need to be recreated after CoroBegin off the frame.
1423 DenseMap<Instruction *, llvm::Optional<APInt>> AliasOffetMap{};
1424 SmallPtrSet<Instruction *, 4> Users{};
1425 SmallPtrSet<IntrinsicInst *, 2> LifetimeStarts{};
1426 bool MayWriteBeforeCoroBegin{false};
1427 bool ShouldUseLifetimeStartInfo{true};
1428
1429 mutable llvm::Optional<bool> ShouldLiveOnFrame{};
1430
computeShouldLiveOnFrame__anon8d9a21161311::AllocaUseVisitor1431 bool computeShouldLiveOnFrame() const {
1432 // If lifetime information is available, we check it first since it's
1433 // more precise. We look at every pair of lifetime.start intrinsic and
1434 // every basic block that uses the pointer to see if they cross suspension
1435 // points. The uses cover both direct uses as well as indirect uses.
1436 if (ShouldUseLifetimeStartInfo && !LifetimeStarts.empty()) {
1437 for (auto *I : Users)
1438 for (auto *S : LifetimeStarts)
1439 if (Checker.isDefinitionAcrossSuspend(*S, I))
1440 return true;
1441 return false;
1442 }
1443 // FIXME: Ideally the isEscaped check should come at the beginning.
1444 // However there are a few loose ends that need to be fixed first before
1445 // we can do that. We need to make sure we are not over-conservative, so
1446 // that the data accessed in-between await_suspend and symmetric transfer
1447 // is always put on the stack, and also data accessed after coro.end is
1448 // always put on the stack (esp the return object). To fix that, we need
1449 // to:
1450 // 1) Potentially treat sret as nocapture in calls
1451 // 2) Special handle the return object and put it on the stack
1452 // 3) Utilize lifetime.end intrinsic
1453 if (PI.isEscaped())
1454 return true;
1455
1456 for (auto *U1 : Users)
1457 for (auto *U2 : Users)
1458 if (Checker.isDefinitionAcrossSuspend(*U1, U2))
1459 return true;
1460
1461 return false;
1462 }
1463
handleMayWrite__anon8d9a21161311::AllocaUseVisitor1464 void handleMayWrite(const Instruction &I) {
1465 if (!DT.dominates(&CoroBegin, &I))
1466 MayWriteBeforeCoroBegin = true;
1467 }
1468
usedAfterCoroBegin__anon8d9a21161311::AllocaUseVisitor1469 bool usedAfterCoroBegin(Instruction &I) {
1470 for (auto &U : I.uses())
1471 if (DT.dominates(&CoroBegin, U))
1472 return true;
1473 return false;
1474 }
1475
handleAlias__anon8d9a21161311::AllocaUseVisitor1476 void handleAlias(Instruction &I) {
1477 // We track all aliases created prior to CoroBegin but used after.
1478 // These aliases may need to be recreated after CoroBegin if the alloca
1479 // need to live on the frame.
1480 if (DT.dominates(&CoroBegin, &I) || !usedAfterCoroBegin(I))
1481 return;
1482
1483 if (!IsOffsetKnown) {
1484 AliasOffetMap[&I].reset();
1485 } else {
1486 auto Itr = AliasOffetMap.find(&I);
1487 if (Itr == AliasOffetMap.end()) {
1488 AliasOffetMap[&I] = Offset;
1489 } else if (Itr->second && *Itr->second != Offset) {
1490 // If we have seen two different possible values for this alias, we set
1491 // it to empty.
1492 AliasOffetMap[&I].reset();
1493 }
1494 }
1495 }
1496 };
1497 } // namespace
1498
1499 // We need to make room to insert a spill after initial PHIs, but before
1500 // catchswitch instruction. Placing it before violates the requirement that
1501 // catchswitch, like all other EHPads must be the first nonPHI in a block.
1502 //
1503 // Split away catchswitch into a separate block and insert in its place:
1504 //
1505 // cleanuppad <InsertPt> cleanupret.
1506 //
1507 // cleanupret instruction will act as an insert point for the spill.
splitBeforeCatchSwitch(CatchSwitchInst * CatchSwitch)1508 static Instruction *splitBeforeCatchSwitch(CatchSwitchInst *CatchSwitch) {
1509 BasicBlock *CurrentBlock = CatchSwitch->getParent();
1510 BasicBlock *NewBlock = CurrentBlock->splitBasicBlock(CatchSwitch);
1511 CurrentBlock->getTerminator()->eraseFromParent();
1512
1513 auto *CleanupPad =
1514 CleanupPadInst::Create(CatchSwitch->getParentPad(), {}, "", CurrentBlock);
1515 auto *CleanupRet =
1516 CleanupReturnInst::Create(CleanupPad, NewBlock, CurrentBlock);
1517 return CleanupRet;
1518 }
1519
createFramePtr(coro::Shape & Shape)1520 static void createFramePtr(coro::Shape &Shape) {
1521 auto *CB = Shape.CoroBegin;
1522 IRBuilder<> Builder(CB->getNextNode());
1523 StructType *FrameTy = Shape.FrameTy;
1524 PointerType *FramePtrTy = FrameTy->getPointerTo();
1525 Shape.FramePtr =
1526 cast<Instruction>(Builder.CreateBitCast(CB, FramePtrTy, "FramePtr"));
1527 }
1528
1529 // Replace all alloca and SSA values that are accessed across suspend points
1530 // with GetElementPointer from coroutine frame + loads and stores. Create an
1531 // AllocaSpillBB that will become the new entry block for the resume parts of
1532 // the coroutine:
1533 //
1534 // %hdl = coro.begin(...)
1535 // whatever
1536 //
1537 // becomes:
1538 //
1539 // %hdl = coro.begin(...)
1540 // %FramePtr = bitcast i8* hdl to %f.frame*
1541 // br label %AllocaSpillBB
1542 //
1543 // AllocaSpillBB:
1544 // ; geps corresponding to allocas that were moved to coroutine frame
1545 // br label PostSpill
1546 //
1547 // PostSpill:
1548 // whatever
1549 //
1550 //
insertSpills(const FrameDataInfo & FrameData,coro::Shape & Shape)1551 static void insertSpills(const FrameDataInfo &FrameData, coro::Shape &Shape) {
1552 auto *CB = Shape.CoroBegin;
1553 LLVMContext &C = CB->getContext();
1554 IRBuilder<> Builder(C);
1555 StructType *FrameTy = Shape.FrameTy;
1556 Value *FramePtr = Shape.FramePtr;
1557 DominatorTree DT(*CB->getFunction());
1558 SmallDenseMap<llvm::Value *, llvm::AllocaInst *, 4> DbgPtrAllocaCache;
1559
1560 // Create a GEP with the given index into the coroutine frame for the original
1561 // value Orig. Appends an extra 0 index for array-allocas, preserving the
1562 // original type.
1563 auto GetFramePointer = [&](Value *Orig) -> Value * {
1564 FieldIDType Index = FrameData.getFieldIndex(Orig);
1565 SmallVector<Value *, 3> Indices = {
1566 ConstantInt::get(Type::getInt32Ty(C), 0),
1567 ConstantInt::get(Type::getInt32Ty(C), Index),
1568 };
1569
1570 if (auto *AI = dyn_cast<AllocaInst>(Orig)) {
1571 if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize())) {
1572 auto Count = CI->getValue().getZExtValue();
1573 if (Count > 1) {
1574 Indices.push_back(ConstantInt::get(Type::getInt32Ty(C), 0));
1575 }
1576 } else {
1577 report_fatal_error("Coroutines cannot handle non static allocas yet");
1578 }
1579 }
1580
1581 auto GEP = cast<GetElementPtrInst>(
1582 Builder.CreateInBoundsGEP(FrameTy, FramePtr, Indices));
1583 if (auto *AI = dyn_cast<AllocaInst>(Orig)) {
1584 if (FrameData.getDynamicAlign(Orig) != 0) {
1585 assert(FrameData.getDynamicAlign(Orig) == AI->getAlign().value());
1586 auto *M = AI->getModule();
1587 auto *IntPtrTy = M->getDataLayout().getIntPtrType(AI->getType());
1588 auto *PtrValue = Builder.CreatePtrToInt(GEP, IntPtrTy);
1589 auto *AlignMask =
1590 ConstantInt::get(IntPtrTy, AI->getAlign().value() - 1);
1591 PtrValue = Builder.CreateAdd(PtrValue, AlignMask);
1592 PtrValue = Builder.CreateAnd(PtrValue, Builder.CreateNot(AlignMask));
1593 return Builder.CreateIntToPtr(PtrValue, AI->getType());
1594 }
1595 // If the type of GEP is not equal to the type of AllocaInst, it implies
1596 // that the AllocaInst may be reused in the Frame slot of other
1597 // AllocaInst. So We cast GEP to the AllocaInst here to re-use
1598 // the Frame storage.
1599 //
1600 // Note: If we change the strategy dealing with alignment, we need to refine
1601 // this casting.
1602 if (GEP->getResultElementType() != Orig->getType())
1603 return Builder.CreateBitCast(GEP, Orig->getType(),
1604 Orig->getName() + Twine(".cast"));
1605 }
1606 return GEP;
1607 };
1608
1609 for (auto const &E : FrameData.Spills) {
1610 Value *Def = E.first;
1611 auto SpillAlignment = Align(FrameData.getAlign(Def));
1612 // Create a store instruction storing the value into the
1613 // coroutine frame.
1614 Instruction *InsertPt = nullptr;
1615 Type *ByValTy = nullptr;
1616 if (auto *Arg = dyn_cast<Argument>(Def)) {
1617 // For arguments, we will place the store instruction right after
1618 // the coroutine frame pointer instruction, i.e. bitcast of
1619 // coro.begin from i8* to %f.frame*.
1620 InsertPt = Shape.getInsertPtAfterFramePtr();
1621
1622 // If we're spilling an Argument, make sure we clear 'nocapture'
1623 // from the coroutine function.
1624 Arg->getParent()->removeParamAttr(Arg->getArgNo(), Attribute::NoCapture);
1625
1626 if (Arg->hasByValAttr())
1627 ByValTy = Arg->getParamByValType();
1628 } else if (auto *CSI = dyn_cast<AnyCoroSuspendInst>(Def)) {
1629 // Don't spill immediately after a suspend; splitting assumes
1630 // that the suspend will be followed by a branch.
1631 InsertPt = CSI->getParent()->getSingleSuccessor()->getFirstNonPHI();
1632 } else {
1633 auto *I = cast<Instruction>(Def);
1634 if (!DT.dominates(CB, I)) {
1635 // If it is not dominated by CoroBegin, then spill should be
1636 // inserted immediately after CoroFrame is computed.
1637 InsertPt = Shape.getInsertPtAfterFramePtr();
1638 } else if (auto *II = dyn_cast<InvokeInst>(I)) {
1639 // If we are spilling the result of the invoke instruction, split
1640 // the normal edge and insert the spill in the new block.
1641 auto *NewBB = SplitEdge(II->getParent(), II->getNormalDest());
1642 InsertPt = NewBB->getTerminator();
1643 } else if (isa<PHINode>(I)) {
1644 // Skip the PHINodes and EH pads instructions.
1645 BasicBlock *DefBlock = I->getParent();
1646 if (auto *CSI = dyn_cast<CatchSwitchInst>(DefBlock->getTerminator()))
1647 InsertPt = splitBeforeCatchSwitch(CSI);
1648 else
1649 InsertPt = &*DefBlock->getFirstInsertionPt();
1650 } else {
1651 assert(!I->isTerminator() && "unexpected terminator");
1652 // For all other values, the spill is placed immediately after
1653 // the definition.
1654 InsertPt = I->getNextNode();
1655 }
1656 }
1657
1658 auto Index = FrameData.getFieldIndex(Def);
1659 Builder.SetInsertPoint(InsertPt);
1660 auto *G = Builder.CreateConstInBoundsGEP2_32(
1661 FrameTy, FramePtr, 0, Index, Def->getName() + Twine(".spill.addr"));
1662 if (ByValTy) {
1663 // For byval arguments, we need to store the pointed value in the frame,
1664 // instead of the pointer itself.
1665 auto *Value = Builder.CreateLoad(ByValTy, Def);
1666 Builder.CreateAlignedStore(Value, G, SpillAlignment);
1667 } else {
1668 Builder.CreateAlignedStore(Def, G, SpillAlignment);
1669 }
1670
1671 BasicBlock *CurrentBlock = nullptr;
1672 Value *CurrentReload = nullptr;
1673 for (auto *U : E.second) {
1674 // If we have not seen the use block, create a load instruction to reload
1675 // the spilled value from the coroutine frame. Populates the Value pointer
1676 // reference provided with the frame GEP.
1677 if (CurrentBlock != U->getParent()) {
1678 CurrentBlock = U->getParent();
1679 Builder.SetInsertPoint(&*CurrentBlock->getFirstInsertionPt());
1680
1681 auto *GEP = GetFramePointer(E.first);
1682 GEP->setName(E.first->getName() + Twine(".reload.addr"));
1683 if (ByValTy)
1684 CurrentReload = GEP;
1685 else
1686 CurrentReload = Builder.CreateAlignedLoad(
1687 FrameTy->getElementType(FrameData.getFieldIndex(E.first)), GEP,
1688 SpillAlignment, E.first->getName() + Twine(".reload"));
1689
1690 TinyPtrVector<DbgDeclareInst *> DIs = FindDbgDeclareUses(Def);
1691 for (DbgDeclareInst *DDI : DIs) {
1692 bool AllowUnresolved = false;
1693 // This dbg.declare is preserved for all coro-split function
1694 // fragments. It will be unreachable in the main function, and
1695 // processed by coro::salvageDebugInfo() by CoroCloner.
1696 DIBuilder(*CurrentBlock->getParent()->getParent(), AllowUnresolved)
1697 .insertDeclare(CurrentReload, DDI->getVariable(),
1698 DDI->getExpression(), DDI->getDebugLoc(),
1699 &*Builder.GetInsertPoint());
1700 // This dbg.declare is for the main function entry point. It
1701 // will be deleted in all coro-split functions.
1702 coro::salvageDebugInfo(DbgPtrAllocaCache, DDI, Shape.OptimizeFrame);
1703 }
1704 }
1705
1706 // Salvage debug info on any dbg.addr that we see. We do not insert them
1707 // into each block where we have a use though.
1708 if (auto *DI = dyn_cast<DbgAddrIntrinsic>(U)) {
1709 coro::salvageDebugInfo(DbgPtrAllocaCache, DI, Shape.OptimizeFrame);
1710 }
1711
1712 // If we have a single edge PHINode, remove it and replace it with a
1713 // reload from the coroutine frame. (We already took care of multi edge
1714 // PHINodes by rewriting them in the rewritePHIs function).
1715 if (auto *PN = dyn_cast<PHINode>(U)) {
1716 assert(PN->getNumIncomingValues() == 1 &&
1717 "unexpected number of incoming "
1718 "values in the PHINode");
1719 PN->replaceAllUsesWith(CurrentReload);
1720 PN->eraseFromParent();
1721 continue;
1722 }
1723
1724 // Replace all uses of CurrentValue in the current instruction with
1725 // reload.
1726 U->replaceUsesOfWith(Def, CurrentReload);
1727 }
1728 }
1729
1730 BasicBlock *FramePtrBB = Shape.getInsertPtAfterFramePtr()->getParent();
1731
1732 auto SpillBlock = FramePtrBB->splitBasicBlock(
1733 Shape.getInsertPtAfterFramePtr(), "AllocaSpillBB");
1734 SpillBlock->splitBasicBlock(&SpillBlock->front(), "PostSpill");
1735 Shape.AllocaSpillBlock = SpillBlock;
1736
1737 // retcon and retcon.once lowering assumes all uses have been sunk.
1738 if (Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce ||
1739 Shape.ABI == coro::ABI::Async) {
1740 // If we found any allocas, replace all of their remaining uses with Geps.
1741 Builder.SetInsertPoint(&SpillBlock->front());
1742 for (const auto &P : FrameData.Allocas) {
1743 AllocaInst *Alloca = P.Alloca;
1744 auto *G = GetFramePointer(Alloca);
1745
1746 // We are not using ReplaceInstWithInst(P.first, cast<Instruction>(G))
1747 // here, as we are changing location of the instruction.
1748 G->takeName(Alloca);
1749 Alloca->replaceAllUsesWith(G);
1750 Alloca->eraseFromParent();
1751 }
1752 return;
1753 }
1754
1755 // If we found any alloca, replace all of their remaining uses with GEP
1756 // instructions. To remain debugbility, we replace the uses of allocas for
1757 // dbg.declares and dbg.values with the reload from the frame.
1758 // Note: We cannot replace the alloca with GEP instructions indiscriminately,
1759 // as some of the uses may not be dominated by CoroBegin.
1760 Builder.SetInsertPoint(&Shape.AllocaSpillBlock->front());
1761 SmallVector<Instruction *, 4> UsersToUpdate;
1762 for (const auto &A : FrameData.Allocas) {
1763 AllocaInst *Alloca = A.Alloca;
1764 UsersToUpdate.clear();
1765 for (User *U : Alloca->users()) {
1766 auto *I = cast<Instruction>(U);
1767 if (DT.dominates(CB, I))
1768 UsersToUpdate.push_back(I);
1769 }
1770 if (UsersToUpdate.empty())
1771 continue;
1772 auto *G = GetFramePointer(Alloca);
1773 G->setName(Alloca->getName() + Twine(".reload.addr"));
1774
1775 SmallVector<DbgVariableIntrinsic *, 4> DIs;
1776 findDbgUsers(DIs, Alloca);
1777 for (auto *DVI : DIs)
1778 DVI->replaceUsesOfWith(Alloca, G);
1779
1780 for (Instruction *I : UsersToUpdate)
1781 I->replaceUsesOfWith(Alloca, G);
1782 }
1783 Builder.SetInsertPoint(Shape.getInsertPtAfterFramePtr());
1784 for (const auto &A : FrameData.Allocas) {
1785 AllocaInst *Alloca = A.Alloca;
1786 if (A.MayWriteBeforeCoroBegin) {
1787 // isEscaped really means potentially modified before CoroBegin.
1788 if (Alloca->isArrayAllocation())
1789 report_fatal_error(
1790 "Coroutines cannot handle copying of array allocas yet");
1791
1792 auto *G = GetFramePointer(Alloca);
1793 auto *Value = Builder.CreateLoad(Alloca->getAllocatedType(), Alloca);
1794 Builder.CreateStore(Value, G);
1795 }
1796 // For each alias to Alloca created before CoroBegin but used after
1797 // CoroBegin, we recreate them after CoroBegin by appplying the offset
1798 // to the pointer in the frame.
1799 for (const auto &Alias : A.Aliases) {
1800 auto *FramePtr = GetFramePointer(Alloca);
1801 auto *FramePtrRaw =
1802 Builder.CreateBitCast(FramePtr, Type::getInt8PtrTy(C));
1803 auto &Value = *Alias.second;
1804 auto ITy = IntegerType::get(C, Value.getBitWidth());
1805 auto *AliasPtr = Builder.CreateGEP(Type::getInt8Ty(C), FramePtrRaw,
1806 ConstantInt::get(ITy, Value));
1807 auto *AliasPtrTyped =
1808 Builder.CreateBitCast(AliasPtr, Alias.first->getType());
1809 Alias.first->replaceUsesWithIf(
1810 AliasPtrTyped, [&](Use &U) { return DT.dominates(CB, U); });
1811 }
1812 }
1813 }
1814
1815 // Moves the values in the PHIs in SuccBB that correspong to PredBB into a new
1816 // PHI in InsertedBB.
movePHIValuesToInsertedBlock(BasicBlock * SuccBB,BasicBlock * InsertedBB,BasicBlock * PredBB,PHINode * UntilPHI=nullptr)1817 static void movePHIValuesToInsertedBlock(BasicBlock *SuccBB,
1818 BasicBlock *InsertedBB,
1819 BasicBlock *PredBB,
1820 PHINode *UntilPHI = nullptr) {
1821 auto *PN = cast<PHINode>(&SuccBB->front());
1822 do {
1823 int Index = PN->getBasicBlockIndex(InsertedBB);
1824 Value *V = PN->getIncomingValue(Index);
1825 PHINode *InputV = PHINode::Create(
1826 V->getType(), 1, V->getName() + Twine(".") + SuccBB->getName(),
1827 &InsertedBB->front());
1828 InputV->addIncoming(V, PredBB);
1829 PN->setIncomingValue(Index, InputV);
1830 PN = dyn_cast<PHINode>(PN->getNextNode());
1831 } while (PN != UntilPHI);
1832 }
1833
1834 // Rewrites the PHI Nodes in a cleanuppad.
rewritePHIsForCleanupPad(BasicBlock * CleanupPadBB,CleanupPadInst * CleanupPad)1835 static void rewritePHIsForCleanupPad(BasicBlock *CleanupPadBB,
1836 CleanupPadInst *CleanupPad) {
1837 // For every incoming edge to a CleanupPad we will create a new block holding
1838 // all incoming values in single-value PHI nodes. We will then create another
1839 // block to act as a dispather (as all unwind edges for related EH blocks
1840 // must be the same).
1841 //
1842 // cleanuppad:
1843 // %2 = phi i32[%0, %catchswitch], [%1, %catch.1]
1844 // %3 = cleanuppad within none []
1845 //
1846 // It will create:
1847 //
1848 // cleanuppad.corodispatch
1849 // %2 = phi i8[0, %catchswitch], [1, %catch.1]
1850 // %3 = cleanuppad within none []
1851 // switch i8 % 2, label %unreachable
1852 // [i8 0, label %cleanuppad.from.catchswitch
1853 // i8 1, label %cleanuppad.from.catch.1]
1854 // cleanuppad.from.catchswitch:
1855 // %4 = phi i32 [%0, %catchswitch]
1856 // br %label cleanuppad
1857 // cleanuppad.from.catch.1:
1858 // %6 = phi i32 [%1, %catch.1]
1859 // br %label cleanuppad
1860 // cleanuppad:
1861 // %8 = phi i32 [%4, %cleanuppad.from.catchswitch],
1862 // [%6, %cleanuppad.from.catch.1]
1863
1864 // Unreachable BB, in case switching on an invalid value in the dispatcher.
1865 auto *UnreachBB = BasicBlock::Create(
1866 CleanupPadBB->getContext(), "unreachable", CleanupPadBB->getParent());
1867 IRBuilder<> Builder(UnreachBB);
1868 Builder.CreateUnreachable();
1869
1870 // Create a new cleanuppad which will be the dispatcher.
1871 auto *NewCleanupPadBB =
1872 BasicBlock::Create(CleanupPadBB->getContext(),
1873 CleanupPadBB->getName() + Twine(".corodispatch"),
1874 CleanupPadBB->getParent(), CleanupPadBB);
1875 Builder.SetInsertPoint(NewCleanupPadBB);
1876 auto *SwitchType = Builder.getInt8Ty();
1877 auto *SetDispatchValuePN =
1878 Builder.CreatePHI(SwitchType, pred_size(CleanupPadBB));
1879 CleanupPad->removeFromParent();
1880 CleanupPad->insertAfter(SetDispatchValuePN);
1881 auto *SwitchOnDispatch = Builder.CreateSwitch(SetDispatchValuePN, UnreachBB,
1882 pred_size(CleanupPadBB));
1883
1884 int SwitchIndex = 0;
1885 SmallVector<BasicBlock *, 8> Preds(predecessors(CleanupPadBB));
1886 for (BasicBlock *Pred : Preds) {
1887 // Create a new cleanuppad and move the PHI values to there.
1888 auto *CaseBB = BasicBlock::Create(CleanupPadBB->getContext(),
1889 CleanupPadBB->getName() +
1890 Twine(".from.") + Pred->getName(),
1891 CleanupPadBB->getParent(), CleanupPadBB);
1892 updatePhiNodes(CleanupPadBB, Pred, CaseBB);
1893 CaseBB->setName(CleanupPadBB->getName() + Twine(".from.") +
1894 Pred->getName());
1895 Builder.SetInsertPoint(CaseBB);
1896 Builder.CreateBr(CleanupPadBB);
1897 movePHIValuesToInsertedBlock(CleanupPadBB, CaseBB, NewCleanupPadBB);
1898
1899 // Update this Pred to the new unwind point.
1900 setUnwindEdgeTo(Pred->getTerminator(), NewCleanupPadBB);
1901
1902 // Setup the switch in the dispatcher.
1903 auto *SwitchConstant = ConstantInt::get(SwitchType, SwitchIndex);
1904 SetDispatchValuePN->addIncoming(SwitchConstant, Pred);
1905 SwitchOnDispatch->addCase(SwitchConstant, CaseBB);
1906 SwitchIndex++;
1907 }
1908 }
1909
cleanupSinglePredPHIs(Function & F)1910 static void cleanupSinglePredPHIs(Function &F) {
1911 SmallVector<PHINode *, 32> Worklist;
1912 for (auto &BB : F) {
1913 for (auto &Phi : BB.phis()) {
1914 if (Phi.getNumIncomingValues() == 1) {
1915 Worklist.push_back(&Phi);
1916 } else
1917 break;
1918 }
1919 }
1920 while (!Worklist.empty()) {
1921 auto *Phi = Worklist.pop_back_val();
1922 auto *OriginalValue = Phi->getIncomingValue(0);
1923 Phi->replaceAllUsesWith(OriginalValue);
1924 }
1925 }
1926
rewritePHIs(BasicBlock & BB)1927 static void rewritePHIs(BasicBlock &BB) {
1928 // For every incoming edge we will create a block holding all
1929 // incoming values in a single PHI nodes.
1930 //
1931 // loop:
1932 // %n.val = phi i32[%n, %entry], [%inc, %loop]
1933 //
1934 // It will create:
1935 //
1936 // loop.from.entry:
1937 // %n.loop.pre = phi i32 [%n, %entry]
1938 // br %label loop
1939 // loop.from.loop:
1940 // %inc.loop.pre = phi i32 [%inc, %loop]
1941 // br %label loop
1942 //
1943 // After this rewrite, further analysis will ignore any phi nodes with more
1944 // than one incoming edge.
1945
1946 // TODO: Simplify PHINodes in the basic block to remove duplicate
1947 // predecessors.
1948
1949 // Special case for CleanupPad: all EH blocks must have the same unwind edge
1950 // so we need to create an additional "dispatcher" block.
1951 if (auto *CleanupPad =
1952 dyn_cast_or_null<CleanupPadInst>(BB.getFirstNonPHI())) {
1953 SmallVector<BasicBlock *, 8> Preds(predecessors(&BB));
1954 for (BasicBlock *Pred : Preds) {
1955 if (CatchSwitchInst *CS =
1956 dyn_cast<CatchSwitchInst>(Pred->getTerminator())) {
1957 // CleanupPad with a CatchSwitch predecessor: therefore this is an
1958 // unwind destination that needs to be handle specially.
1959 assert(CS->getUnwindDest() == &BB);
1960 (void)CS;
1961 rewritePHIsForCleanupPad(&BB, CleanupPad);
1962 return;
1963 }
1964 }
1965 }
1966
1967 LandingPadInst *LandingPad = nullptr;
1968 PHINode *ReplPHI = nullptr;
1969 if ((LandingPad = dyn_cast_or_null<LandingPadInst>(BB.getFirstNonPHI()))) {
1970 // ehAwareSplitEdge will clone the LandingPad in all the edge blocks.
1971 // We replace the original landing pad with a PHINode that will collect the
1972 // results from all of them.
1973 ReplPHI = PHINode::Create(LandingPad->getType(), 1, "", LandingPad);
1974 ReplPHI->takeName(LandingPad);
1975 LandingPad->replaceAllUsesWith(ReplPHI);
1976 // We will erase the original landing pad at the end of this function after
1977 // ehAwareSplitEdge cloned it in the transition blocks.
1978 }
1979
1980 SmallVector<BasicBlock *, 8> Preds(predecessors(&BB));
1981 for (BasicBlock *Pred : Preds) {
1982 auto *IncomingBB = ehAwareSplitEdge(Pred, &BB, LandingPad, ReplPHI);
1983 IncomingBB->setName(BB.getName() + Twine(".from.") + Pred->getName());
1984
1985 // Stop the moving of values at ReplPHI, as this is either null or the PHI
1986 // that replaced the landing pad.
1987 movePHIValuesToInsertedBlock(&BB, IncomingBB, Pred, ReplPHI);
1988 }
1989
1990 if (LandingPad) {
1991 // Calls to ehAwareSplitEdge function cloned the original lading pad.
1992 // No longer need it.
1993 LandingPad->eraseFromParent();
1994 }
1995 }
1996
rewritePHIs(Function & F)1997 static void rewritePHIs(Function &F) {
1998 SmallVector<BasicBlock *, 8> WorkList;
1999
2000 for (BasicBlock &BB : F)
2001 if (auto *PN = dyn_cast<PHINode>(&BB.front()))
2002 if (PN->getNumIncomingValues() > 1)
2003 WorkList.push_back(&BB);
2004
2005 for (BasicBlock *BB : WorkList)
2006 rewritePHIs(*BB);
2007 }
2008
2009 // Check for instructions that we can recreate on resume as opposed to spill
2010 // the result into a coroutine frame.
materializable(Instruction & V)2011 static bool materializable(Instruction &V) {
2012 return isa<CastInst>(&V) || isa<GetElementPtrInst>(&V) ||
2013 isa<BinaryOperator>(&V) || isa<CmpInst>(&V) || isa<SelectInst>(&V);
2014 }
2015
2016 // Check for structural coroutine intrinsics that should not be spilled into
2017 // the coroutine frame.
isCoroutineStructureIntrinsic(Instruction & I)2018 static bool isCoroutineStructureIntrinsic(Instruction &I) {
2019 return isa<CoroIdInst>(&I) || isa<CoroSaveInst>(&I) ||
2020 isa<CoroSuspendInst>(&I);
2021 }
2022
2023 // For every use of the value that is across suspend point, recreate that value
2024 // after a suspend point.
rewriteMaterializableInstructions(IRBuilder<> & IRB,const SpillInfo & Spills)2025 static void rewriteMaterializableInstructions(IRBuilder<> &IRB,
2026 const SpillInfo &Spills) {
2027 for (const auto &E : Spills) {
2028 Value *Def = E.first;
2029 BasicBlock *CurrentBlock = nullptr;
2030 Instruction *CurrentMaterialization = nullptr;
2031 for (Instruction *U : E.second) {
2032 // If we have not seen this block, materialize the value.
2033 if (CurrentBlock != U->getParent()) {
2034
2035 bool IsInCoroSuspendBlock = isa<AnyCoroSuspendInst>(U);
2036 CurrentBlock = U->getParent();
2037 auto *InsertBlock = IsInCoroSuspendBlock
2038 ? CurrentBlock->getSinglePredecessor()
2039 : CurrentBlock;
2040 CurrentMaterialization = cast<Instruction>(Def)->clone();
2041 CurrentMaterialization->setName(Def->getName());
2042 CurrentMaterialization->insertBefore(
2043 IsInCoroSuspendBlock ? InsertBlock->getTerminator()
2044 : &*InsertBlock->getFirstInsertionPt());
2045 }
2046 if (auto *PN = dyn_cast<PHINode>(U)) {
2047 assert(PN->getNumIncomingValues() == 1 &&
2048 "unexpected number of incoming "
2049 "values in the PHINode");
2050 PN->replaceAllUsesWith(CurrentMaterialization);
2051 PN->eraseFromParent();
2052 continue;
2053 }
2054 // Replace all uses of Def in the current instruction with the
2055 // CurrentMaterialization for the block.
2056 U->replaceUsesOfWith(Def, CurrentMaterialization);
2057 }
2058 }
2059 }
2060
2061 // Splits the block at a particular instruction unless it is the first
2062 // instruction in the block with a single predecessor.
splitBlockIfNotFirst(Instruction * I,const Twine & Name)2063 static BasicBlock *splitBlockIfNotFirst(Instruction *I, const Twine &Name) {
2064 auto *BB = I->getParent();
2065 if (&BB->front() == I) {
2066 if (BB->getSinglePredecessor()) {
2067 BB->setName(Name);
2068 return BB;
2069 }
2070 }
2071 return BB->splitBasicBlock(I, Name);
2072 }
2073
2074 // Split above and below a particular instruction so that it
2075 // will be all alone by itself in a block.
splitAround(Instruction * I,const Twine & Name)2076 static void splitAround(Instruction *I, const Twine &Name) {
2077 splitBlockIfNotFirst(I, Name);
2078 splitBlockIfNotFirst(I->getNextNode(), "After" + Name);
2079 }
2080
isSuspendBlock(BasicBlock * BB)2081 static bool isSuspendBlock(BasicBlock *BB) {
2082 return isa<AnyCoroSuspendInst>(BB->front());
2083 }
2084
2085 typedef SmallPtrSet<BasicBlock*, 8> VisitedBlocksSet;
2086
2087 /// Does control flow starting at the given block ever reach a suspend
2088 /// instruction before reaching a block in VisitedOrFreeBBs?
isSuspendReachableFrom(BasicBlock * From,VisitedBlocksSet & VisitedOrFreeBBs)2089 static bool isSuspendReachableFrom(BasicBlock *From,
2090 VisitedBlocksSet &VisitedOrFreeBBs) {
2091 // Eagerly try to add this block to the visited set. If it's already
2092 // there, stop recursing; this path doesn't reach a suspend before
2093 // either looping or reaching a freeing block.
2094 if (!VisitedOrFreeBBs.insert(From).second)
2095 return false;
2096
2097 // We assume that we'll already have split suspends into their own blocks.
2098 if (isSuspendBlock(From))
2099 return true;
2100
2101 // Recurse on the successors.
2102 for (auto Succ : successors(From)) {
2103 if (isSuspendReachableFrom(Succ, VisitedOrFreeBBs))
2104 return true;
2105 }
2106
2107 return false;
2108 }
2109
2110 /// Is the given alloca "local", i.e. bounded in lifetime to not cross a
2111 /// suspend point?
isLocalAlloca(CoroAllocaAllocInst * AI)2112 static bool isLocalAlloca(CoroAllocaAllocInst *AI) {
2113 // Seed the visited set with all the basic blocks containing a free
2114 // so that we won't pass them up.
2115 VisitedBlocksSet VisitedOrFreeBBs;
2116 for (auto User : AI->users()) {
2117 if (auto FI = dyn_cast<CoroAllocaFreeInst>(User))
2118 VisitedOrFreeBBs.insert(FI->getParent());
2119 }
2120
2121 return !isSuspendReachableFrom(AI->getParent(), VisitedOrFreeBBs);
2122 }
2123
2124 /// After we split the coroutine, will the given basic block be along
2125 /// an obvious exit path for the resumption function?
willLeaveFunctionImmediatelyAfter(BasicBlock * BB,unsigned depth=3)2126 static bool willLeaveFunctionImmediatelyAfter(BasicBlock *BB,
2127 unsigned depth = 3) {
2128 // If we've bottomed out our depth count, stop searching and assume
2129 // that the path might loop back.
2130 if (depth == 0) return false;
2131
2132 // If this is a suspend block, we're about to exit the resumption function.
2133 if (isSuspendBlock(BB)) return true;
2134
2135 // Recurse into the successors.
2136 for (auto Succ : successors(BB)) {
2137 if (!willLeaveFunctionImmediatelyAfter(Succ, depth - 1))
2138 return false;
2139 }
2140
2141 // If none of the successors leads back in a loop, we're on an exit/abort.
2142 return true;
2143 }
2144
localAllocaNeedsStackSave(CoroAllocaAllocInst * AI)2145 static bool localAllocaNeedsStackSave(CoroAllocaAllocInst *AI) {
2146 // Look for a free that isn't sufficiently obviously followed by
2147 // either a suspend or a termination, i.e. something that will leave
2148 // the coro resumption frame.
2149 for (auto U : AI->users()) {
2150 auto FI = dyn_cast<CoroAllocaFreeInst>(U);
2151 if (!FI) continue;
2152
2153 if (!willLeaveFunctionImmediatelyAfter(FI->getParent()))
2154 return true;
2155 }
2156
2157 // If we never found one, we don't need a stack save.
2158 return false;
2159 }
2160
2161 /// Turn each of the given local allocas into a normal (dynamic) alloca
2162 /// instruction.
lowerLocalAllocas(ArrayRef<CoroAllocaAllocInst * > LocalAllocas,SmallVectorImpl<Instruction * > & DeadInsts)2163 static void lowerLocalAllocas(ArrayRef<CoroAllocaAllocInst*> LocalAllocas,
2164 SmallVectorImpl<Instruction*> &DeadInsts) {
2165 for (auto AI : LocalAllocas) {
2166 auto M = AI->getModule();
2167 IRBuilder<> Builder(AI);
2168
2169 // Save the stack depth. Try to avoid doing this if the stackrestore
2170 // is going to immediately precede a return or something.
2171 Value *StackSave = nullptr;
2172 if (localAllocaNeedsStackSave(AI))
2173 StackSave = Builder.CreateCall(
2174 Intrinsic::getDeclaration(M, Intrinsic::stacksave));
2175
2176 // Allocate memory.
2177 auto Alloca = Builder.CreateAlloca(Builder.getInt8Ty(), AI->getSize());
2178 Alloca->setAlignment(AI->getAlignment());
2179
2180 for (auto U : AI->users()) {
2181 // Replace gets with the allocation.
2182 if (isa<CoroAllocaGetInst>(U)) {
2183 U->replaceAllUsesWith(Alloca);
2184
2185 // Replace frees with stackrestores. This is safe because
2186 // alloca.alloc is required to obey a stack discipline, although we
2187 // don't enforce that structurally.
2188 } else {
2189 auto FI = cast<CoroAllocaFreeInst>(U);
2190 if (StackSave) {
2191 Builder.SetInsertPoint(FI);
2192 Builder.CreateCall(
2193 Intrinsic::getDeclaration(M, Intrinsic::stackrestore),
2194 StackSave);
2195 }
2196 }
2197 DeadInsts.push_back(cast<Instruction>(U));
2198 }
2199
2200 DeadInsts.push_back(AI);
2201 }
2202 }
2203
2204 /// Turn the given coro.alloca.alloc call into a dynamic allocation.
2205 /// This happens during the all-instructions iteration, so it must not
2206 /// delete the call.
lowerNonLocalAlloca(CoroAllocaAllocInst * AI,coro::Shape & Shape,SmallVectorImpl<Instruction * > & DeadInsts)2207 static Instruction *lowerNonLocalAlloca(CoroAllocaAllocInst *AI,
2208 coro::Shape &Shape,
2209 SmallVectorImpl<Instruction*> &DeadInsts) {
2210 IRBuilder<> Builder(AI);
2211 auto Alloc = Shape.emitAlloc(Builder, AI->getSize(), nullptr);
2212
2213 for (User *U : AI->users()) {
2214 if (isa<CoroAllocaGetInst>(U)) {
2215 U->replaceAllUsesWith(Alloc);
2216 } else {
2217 auto FI = cast<CoroAllocaFreeInst>(U);
2218 Builder.SetInsertPoint(FI);
2219 Shape.emitDealloc(Builder, Alloc, nullptr);
2220 }
2221 DeadInsts.push_back(cast<Instruction>(U));
2222 }
2223
2224 // Push this on last so that it gets deleted after all the others.
2225 DeadInsts.push_back(AI);
2226
2227 // Return the new allocation value so that we can check for needed spills.
2228 return cast<Instruction>(Alloc);
2229 }
2230
2231 /// Get the current swifterror value.
emitGetSwiftErrorValue(IRBuilder<> & Builder,Type * ValueTy,coro::Shape & Shape)2232 static Value *emitGetSwiftErrorValue(IRBuilder<> &Builder, Type *ValueTy,
2233 coro::Shape &Shape) {
2234 // Make a fake function pointer as a sort of intrinsic.
2235 auto FnTy = FunctionType::get(ValueTy, {}, false);
2236 auto Fn = ConstantPointerNull::get(FnTy->getPointerTo());
2237
2238 auto Call = Builder.CreateCall(FnTy, Fn, {});
2239 Shape.SwiftErrorOps.push_back(Call);
2240
2241 return Call;
2242 }
2243
2244 /// Set the given value as the current swifterror value.
2245 ///
2246 /// Returns a slot that can be used as a swifterror slot.
emitSetSwiftErrorValue(IRBuilder<> & Builder,Value * V,coro::Shape & Shape)2247 static Value *emitSetSwiftErrorValue(IRBuilder<> &Builder, Value *V,
2248 coro::Shape &Shape) {
2249 // Make a fake function pointer as a sort of intrinsic.
2250 auto FnTy = FunctionType::get(V->getType()->getPointerTo(),
2251 {V->getType()}, false);
2252 auto Fn = ConstantPointerNull::get(FnTy->getPointerTo());
2253
2254 auto Call = Builder.CreateCall(FnTy, Fn, { V });
2255 Shape.SwiftErrorOps.push_back(Call);
2256
2257 return Call;
2258 }
2259
2260 /// Set the swifterror value from the given alloca before a call,
2261 /// then put in back in the alloca afterwards.
2262 ///
2263 /// Returns an address that will stand in for the swifterror slot
2264 /// until splitting.
emitSetAndGetSwiftErrorValueAround(Instruction * Call,AllocaInst * Alloca,coro::Shape & Shape)2265 static Value *emitSetAndGetSwiftErrorValueAround(Instruction *Call,
2266 AllocaInst *Alloca,
2267 coro::Shape &Shape) {
2268 auto ValueTy = Alloca->getAllocatedType();
2269 IRBuilder<> Builder(Call);
2270
2271 // Load the current value from the alloca and set it as the
2272 // swifterror value.
2273 auto ValueBeforeCall = Builder.CreateLoad(ValueTy, Alloca);
2274 auto Addr = emitSetSwiftErrorValue(Builder, ValueBeforeCall, Shape);
2275
2276 // Move to after the call. Since swifterror only has a guaranteed
2277 // value on normal exits, we can ignore implicit and explicit unwind
2278 // edges.
2279 if (isa<CallInst>(Call)) {
2280 Builder.SetInsertPoint(Call->getNextNode());
2281 } else {
2282 auto Invoke = cast<InvokeInst>(Call);
2283 Builder.SetInsertPoint(Invoke->getNormalDest()->getFirstNonPHIOrDbg());
2284 }
2285
2286 // Get the current swifterror value and store it to the alloca.
2287 auto ValueAfterCall = emitGetSwiftErrorValue(Builder, ValueTy, Shape);
2288 Builder.CreateStore(ValueAfterCall, Alloca);
2289
2290 return Addr;
2291 }
2292
2293 /// Eliminate a formerly-swifterror alloca by inserting the get/set
2294 /// intrinsics and attempting to MemToReg the alloca away.
eliminateSwiftErrorAlloca(Function & F,AllocaInst * Alloca,coro::Shape & Shape)2295 static void eliminateSwiftErrorAlloca(Function &F, AllocaInst *Alloca,
2296 coro::Shape &Shape) {
2297 for (Use &Use : llvm::make_early_inc_range(Alloca->uses())) {
2298 // swifterror values can only be used in very specific ways.
2299 // We take advantage of that here.
2300 auto User = Use.getUser();
2301 if (isa<LoadInst>(User) || isa<StoreInst>(User))
2302 continue;
2303
2304 assert(isa<CallInst>(User) || isa<InvokeInst>(User));
2305 auto Call = cast<Instruction>(User);
2306
2307 auto Addr = emitSetAndGetSwiftErrorValueAround(Call, Alloca, Shape);
2308
2309 // Use the returned slot address as the call argument.
2310 Use.set(Addr);
2311 }
2312
2313 // All the uses should be loads and stores now.
2314 assert(isAllocaPromotable(Alloca));
2315 }
2316
2317 /// "Eliminate" a swifterror argument by reducing it to the alloca case
2318 /// and then loading and storing in the prologue and epilog.
2319 ///
2320 /// The argument keeps the swifterror flag.
eliminateSwiftErrorArgument(Function & F,Argument & Arg,coro::Shape & Shape,SmallVectorImpl<AllocaInst * > & AllocasToPromote)2321 static void eliminateSwiftErrorArgument(Function &F, Argument &Arg,
2322 coro::Shape &Shape,
2323 SmallVectorImpl<AllocaInst*> &AllocasToPromote) {
2324 IRBuilder<> Builder(F.getEntryBlock().getFirstNonPHIOrDbg());
2325
2326 auto ArgTy = cast<PointerType>(Arg.getType());
2327 // swifterror arguments are required to have pointer-to-pointer type,
2328 // so create a pointer-typed alloca with opaque pointers.
2329 auto ValueTy = ArgTy->isOpaque() ? PointerType::getUnqual(F.getContext())
2330 : ArgTy->getNonOpaquePointerElementType();
2331
2332 // Reduce to the alloca case:
2333
2334 // Create an alloca and replace all uses of the arg with it.
2335 auto Alloca = Builder.CreateAlloca(ValueTy, ArgTy->getAddressSpace());
2336 Arg.replaceAllUsesWith(Alloca);
2337
2338 // Set an initial value in the alloca. swifterror is always null on entry.
2339 auto InitialValue = Constant::getNullValue(ValueTy);
2340 Builder.CreateStore(InitialValue, Alloca);
2341
2342 // Find all the suspends in the function and save and restore around them.
2343 for (auto Suspend : Shape.CoroSuspends) {
2344 (void) emitSetAndGetSwiftErrorValueAround(Suspend, Alloca, Shape);
2345 }
2346
2347 // Find all the coro.ends in the function and restore the error value.
2348 for (auto End : Shape.CoroEnds) {
2349 Builder.SetInsertPoint(End);
2350 auto FinalValue = Builder.CreateLoad(ValueTy, Alloca);
2351 (void) emitSetSwiftErrorValue(Builder, FinalValue, Shape);
2352 }
2353
2354 // Now we can use the alloca logic.
2355 AllocasToPromote.push_back(Alloca);
2356 eliminateSwiftErrorAlloca(F, Alloca, Shape);
2357 }
2358
2359 /// Eliminate all problematic uses of swifterror arguments and allocas
2360 /// from the function. We'll fix them up later when splitting the function.
eliminateSwiftError(Function & F,coro::Shape & Shape)2361 static void eliminateSwiftError(Function &F, coro::Shape &Shape) {
2362 SmallVector<AllocaInst*, 4> AllocasToPromote;
2363
2364 // Look for a swifterror argument.
2365 for (auto &Arg : F.args()) {
2366 if (!Arg.hasSwiftErrorAttr()) continue;
2367
2368 eliminateSwiftErrorArgument(F, Arg, Shape, AllocasToPromote);
2369 break;
2370 }
2371
2372 // Look for swifterror allocas.
2373 for (auto &Inst : F.getEntryBlock()) {
2374 auto Alloca = dyn_cast<AllocaInst>(&Inst);
2375 if (!Alloca || !Alloca->isSwiftError()) continue;
2376
2377 // Clear the swifterror flag.
2378 Alloca->setSwiftError(false);
2379
2380 AllocasToPromote.push_back(Alloca);
2381 eliminateSwiftErrorAlloca(F, Alloca, Shape);
2382 }
2383
2384 // If we have any allocas to promote, compute a dominator tree and
2385 // promote them en masse.
2386 if (!AllocasToPromote.empty()) {
2387 DominatorTree DT(F);
2388 PromoteMemToReg(AllocasToPromote, DT);
2389 }
2390 }
2391
2392 /// retcon and retcon.once conventions assume that all spill uses can be sunk
2393 /// after the coro.begin intrinsic.
sinkSpillUsesAfterCoroBegin(Function & F,const FrameDataInfo & FrameData,CoroBeginInst * CoroBegin)2394 static void sinkSpillUsesAfterCoroBegin(Function &F,
2395 const FrameDataInfo &FrameData,
2396 CoroBeginInst *CoroBegin) {
2397 DominatorTree Dom(F);
2398
2399 SmallSetVector<Instruction *, 32> ToMove;
2400 SmallVector<Instruction *, 32> Worklist;
2401
2402 // Collect all users that precede coro.begin.
2403 for (auto *Def : FrameData.getAllDefs()) {
2404 for (User *U : Def->users()) {
2405 auto Inst = cast<Instruction>(U);
2406 if (Inst->getParent() != CoroBegin->getParent() ||
2407 Dom.dominates(CoroBegin, Inst))
2408 continue;
2409 if (ToMove.insert(Inst))
2410 Worklist.push_back(Inst);
2411 }
2412 }
2413 // Recursively collect users before coro.begin.
2414 while (!Worklist.empty()) {
2415 auto *Def = Worklist.pop_back_val();
2416 for (User *U : Def->users()) {
2417 auto Inst = cast<Instruction>(U);
2418 if (Dom.dominates(CoroBegin, Inst))
2419 continue;
2420 if (ToMove.insert(Inst))
2421 Worklist.push_back(Inst);
2422 }
2423 }
2424
2425 // Sort by dominance.
2426 SmallVector<Instruction *, 64> InsertionList(ToMove.begin(), ToMove.end());
2427 llvm::sort(InsertionList, [&Dom](Instruction *A, Instruction *B) -> bool {
2428 // If a dominates b it should preceed (<) b.
2429 return Dom.dominates(A, B);
2430 });
2431
2432 Instruction *InsertPt = CoroBegin->getNextNode();
2433 for (Instruction *Inst : InsertionList)
2434 Inst->moveBefore(InsertPt);
2435 }
2436
2437 /// For each local variable that all of its user are only used inside one of
2438 /// suspended region, we sink their lifetime.start markers to the place where
2439 /// after the suspend block. Doing so minimizes the lifetime of each variable,
2440 /// hence minimizing the amount of data we end up putting on the frame.
sinkLifetimeStartMarkers(Function & F,coro::Shape & Shape,SuspendCrossingInfo & Checker)2441 static void sinkLifetimeStartMarkers(Function &F, coro::Shape &Shape,
2442 SuspendCrossingInfo &Checker) {
2443 DominatorTree DT(F);
2444
2445 // Collect all possible basic blocks which may dominate all uses of allocas.
2446 SmallPtrSet<BasicBlock *, 4> DomSet;
2447 DomSet.insert(&F.getEntryBlock());
2448 for (auto *CSI : Shape.CoroSuspends) {
2449 BasicBlock *SuspendBlock = CSI->getParent();
2450 assert(isSuspendBlock(SuspendBlock) && SuspendBlock->getSingleSuccessor() &&
2451 "should have split coro.suspend into its own block");
2452 DomSet.insert(SuspendBlock->getSingleSuccessor());
2453 }
2454
2455 for (Instruction &I : instructions(F)) {
2456 AllocaInst* AI = dyn_cast<AllocaInst>(&I);
2457 if (!AI)
2458 continue;
2459
2460 for (BasicBlock *DomBB : DomSet) {
2461 bool Valid = true;
2462 SmallVector<Instruction *, 1> Lifetimes;
2463
2464 auto isLifetimeStart = [](Instruction* I) {
2465 if (auto* II = dyn_cast<IntrinsicInst>(I))
2466 return II->getIntrinsicID() == Intrinsic::lifetime_start;
2467 return false;
2468 };
2469
2470 auto collectLifetimeStart = [&](Instruction *U, AllocaInst *AI) {
2471 if (isLifetimeStart(U)) {
2472 Lifetimes.push_back(U);
2473 return true;
2474 }
2475 if (!U->hasOneUse() || U->stripPointerCasts() != AI)
2476 return false;
2477 if (isLifetimeStart(U->user_back())) {
2478 Lifetimes.push_back(U->user_back());
2479 return true;
2480 }
2481 return false;
2482 };
2483
2484 for (User *U : AI->users()) {
2485 Instruction *UI = cast<Instruction>(U);
2486 // For all users except lifetime.start markers, if they are all
2487 // dominated by one of the basic blocks and do not cross
2488 // suspend points as well, then there is no need to spill the
2489 // instruction.
2490 if (!DT.dominates(DomBB, UI->getParent()) ||
2491 Checker.isDefinitionAcrossSuspend(DomBB, UI)) {
2492 // Skip lifetime.start, GEP and bitcast used by lifetime.start
2493 // markers.
2494 if (collectLifetimeStart(UI, AI))
2495 continue;
2496 Valid = false;
2497 break;
2498 }
2499 }
2500 // Sink lifetime.start markers to dominate block when they are
2501 // only used outside the region.
2502 if (Valid && Lifetimes.size() != 0) {
2503 // May be AI itself, when the type of AI is i8*
2504 auto *NewBitCast = [&](AllocaInst *AI) -> Value* {
2505 if (isa<AllocaInst>(Lifetimes[0]->getOperand(1)))
2506 return AI;
2507 auto *Int8PtrTy = Type::getInt8PtrTy(F.getContext());
2508 return CastInst::Create(Instruction::BitCast, AI, Int8PtrTy, "",
2509 DomBB->getTerminator());
2510 }(AI);
2511
2512 auto *NewLifetime = Lifetimes[0]->clone();
2513 NewLifetime->replaceUsesOfWith(NewLifetime->getOperand(1), NewBitCast);
2514 NewLifetime->insertBefore(DomBB->getTerminator());
2515
2516 // All the outsided lifetime.start markers are no longer necessary.
2517 for (Instruction *S : Lifetimes)
2518 S->eraseFromParent();
2519
2520 break;
2521 }
2522 }
2523 }
2524 }
2525
collectFrameAllocas(Function & F,coro::Shape & Shape,const SuspendCrossingInfo & Checker,SmallVectorImpl<AllocaInfo> & Allocas)2526 static void collectFrameAllocas(Function &F, coro::Shape &Shape,
2527 const SuspendCrossingInfo &Checker,
2528 SmallVectorImpl<AllocaInfo> &Allocas) {
2529 for (Instruction &I : instructions(F)) {
2530 auto *AI = dyn_cast<AllocaInst>(&I);
2531 if (!AI)
2532 continue;
2533 // The PromiseAlloca will be specially handled since it needs to be in a
2534 // fixed position in the frame.
2535 if (AI == Shape.SwitchLowering.PromiseAlloca) {
2536 continue;
2537 }
2538 DominatorTree DT(F);
2539 // The code that uses lifetime.start intrinsic does not work for functions
2540 // with loops without exit. Disable it on ABIs we know to generate such
2541 // code.
2542 bool ShouldUseLifetimeStartInfo =
2543 (Shape.ABI != coro::ABI::Async && Shape.ABI != coro::ABI::Retcon &&
2544 Shape.ABI != coro::ABI::RetconOnce);
2545 AllocaUseVisitor Visitor{F.getParent()->getDataLayout(), DT,
2546 *Shape.CoroBegin, Checker,
2547 ShouldUseLifetimeStartInfo};
2548 Visitor.visitPtr(*AI);
2549 if (!Visitor.getShouldLiveOnFrame())
2550 continue;
2551 Allocas.emplace_back(AI, Visitor.getAliasesCopy(),
2552 Visitor.getMayWriteBeforeCoroBegin());
2553 }
2554 }
2555
salvageDebugInfo(SmallDenseMap<llvm::Value *,llvm::AllocaInst *,4> & DbgPtrAllocaCache,DbgVariableIntrinsic * DVI,bool OptimizeFrame)2556 void coro::salvageDebugInfo(
2557 SmallDenseMap<llvm::Value *, llvm::AllocaInst *, 4> &DbgPtrAllocaCache,
2558 DbgVariableIntrinsic *DVI, bool OptimizeFrame) {
2559 Function *F = DVI->getFunction();
2560 IRBuilder<> Builder(F->getContext());
2561 auto InsertPt = F->getEntryBlock().getFirstInsertionPt();
2562 while (isa<IntrinsicInst>(InsertPt))
2563 ++InsertPt;
2564 Builder.SetInsertPoint(&F->getEntryBlock(), InsertPt);
2565 DIExpression *Expr = DVI->getExpression();
2566 // Follow the pointer arithmetic all the way to the incoming
2567 // function argument and convert into a DIExpression.
2568 bool SkipOutermostLoad = !isa<DbgValueInst>(DVI);
2569 Value *Storage = DVI->getVariableLocationOp(0);
2570 Value *OriginalStorage = Storage;
2571
2572 while (auto *Inst = dyn_cast_or_null<Instruction>(Storage)) {
2573 if (auto *LdInst = dyn_cast<LoadInst>(Inst)) {
2574 Storage = LdInst->getOperand(0);
2575 // FIXME: This is a heuristic that works around the fact that
2576 // LLVM IR debug intrinsics cannot yet distinguish between
2577 // memory and value locations: Because a dbg.declare(alloca) is
2578 // implicitly a memory location no DW_OP_deref operation for the
2579 // last direct load from an alloca is necessary. This condition
2580 // effectively drops the *last* DW_OP_deref in the expression.
2581 if (!SkipOutermostLoad)
2582 Expr = DIExpression::prepend(Expr, DIExpression::DerefBefore);
2583 } else if (auto *StInst = dyn_cast<StoreInst>(Inst)) {
2584 Storage = StInst->getOperand(0);
2585 } else {
2586 SmallVector<uint64_t, 16> Ops;
2587 SmallVector<Value *, 0> AdditionalValues;
2588 Value *Op = llvm::salvageDebugInfoImpl(
2589 *Inst, Expr ? Expr->getNumLocationOperands() : 0, Ops,
2590 AdditionalValues);
2591 if (!Op || !AdditionalValues.empty()) {
2592 // If salvaging failed or salvaging produced more than one location
2593 // operand, give up.
2594 break;
2595 }
2596 Storage = Op;
2597 Expr = DIExpression::appendOpsToArg(Expr, Ops, 0, /*StackValue*/ false);
2598 }
2599 SkipOutermostLoad = false;
2600 }
2601 if (!Storage)
2602 return;
2603
2604 // Store a pointer to the coroutine frame object in an alloca so it
2605 // is available throughout the function when producing unoptimized
2606 // code. Extending the lifetime this way is correct because the
2607 // variable has been declared by a dbg.declare intrinsic.
2608 //
2609 // Avoid to create the alloca would be eliminated by optimization
2610 // passes and the corresponding dbg.declares would be invalid.
2611 if (!OptimizeFrame)
2612 if (auto *Arg = dyn_cast<llvm::Argument>(Storage)) {
2613 auto &Cached = DbgPtrAllocaCache[Storage];
2614 if (!Cached) {
2615 Cached = Builder.CreateAlloca(Storage->getType(), 0, nullptr,
2616 Arg->getName() + ".debug");
2617 Builder.CreateStore(Storage, Cached);
2618 }
2619 Storage = Cached;
2620 // FIXME: LLVM lacks nuanced semantics to differentiate between
2621 // memory and direct locations at the IR level. The backend will
2622 // turn a dbg.declare(alloca, ..., DIExpression()) into a memory
2623 // location. Thus, if there are deref and offset operations in the
2624 // expression, we need to add a DW_OP_deref at the *start* of the
2625 // expression to first load the contents of the alloca before
2626 // adjusting it with the expression.
2627 Expr = DIExpression::prepend(Expr, DIExpression::DerefBefore);
2628 }
2629
2630 DVI->replaceVariableLocationOp(OriginalStorage, Storage);
2631 DVI->setExpression(Expr);
2632 // We only hoist dbg.declare today since it doesn't make sense to hoist
2633 // dbg.value or dbg.addr since they do not have the same function wide
2634 // guarantees that dbg.declare does.
2635 if (!isa<DbgValueInst>(DVI) && !isa<DbgAddrIntrinsic>(DVI)) {
2636 if (auto *II = dyn_cast<InvokeInst>(Storage))
2637 DVI->moveBefore(II->getNormalDest()->getFirstNonPHI());
2638 else if (auto *CBI = dyn_cast<CallBrInst>(Storage))
2639 DVI->moveBefore(CBI->getDefaultDest()->getFirstNonPHI());
2640 else if (auto *InsertPt = dyn_cast<Instruction>(Storage)) {
2641 assert(!InsertPt->isTerminator() &&
2642 "Unimaged terminator that could return a storage.");
2643 DVI->moveAfter(InsertPt);
2644 } else if (isa<Argument>(Storage))
2645 DVI->moveAfter(F->getEntryBlock().getFirstNonPHI());
2646 }
2647 }
2648
buildCoroutineFrame(Function & F,Shape & Shape)2649 void coro::buildCoroutineFrame(Function &F, Shape &Shape) {
2650 // Don't eliminate swifterror in async functions that won't be split.
2651 if (Shape.ABI != coro::ABI::Async || !Shape.CoroSuspends.empty())
2652 eliminateSwiftError(F, Shape);
2653
2654 if (Shape.ABI == coro::ABI::Switch &&
2655 Shape.SwitchLowering.PromiseAlloca) {
2656 Shape.getSwitchCoroId()->clearPromise();
2657 }
2658
2659 // Make sure that all coro.save, coro.suspend and the fallthrough coro.end
2660 // intrinsics are in their own blocks to simplify the logic of building up
2661 // SuspendCrossing data.
2662 for (auto *CSI : Shape.CoroSuspends) {
2663 if (auto *Save = CSI->getCoroSave())
2664 splitAround(Save, "CoroSave");
2665 splitAround(CSI, "CoroSuspend");
2666 }
2667
2668 // Put CoroEnds into their own blocks.
2669 for (AnyCoroEndInst *CE : Shape.CoroEnds) {
2670 splitAround(CE, "CoroEnd");
2671
2672 // Emit the musttail call function in a new block before the CoroEnd.
2673 // We do this here so that the right suspend crossing info is computed for
2674 // the uses of the musttail call function call. (Arguments to the coro.end
2675 // instructions would be ignored)
2676 if (auto *AsyncEnd = dyn_cast<CoroAsyncEndInst>(CE)) {
2677 auto *MustTailCallFn = AsyncEnd->getMustTailCallFunction();
2678 if (!MustTailCallFn)
2679 continue;
2680 IRBuilder<> Builder(AsyncEnd);
2681 SmallVector<Value *, 8> Args(AsyncEnd->args());
2682 auto Arguments = ArrayRef<Value *>(Args).drop_front(3);
2683 auto *Call = createMustTailCall(AsyncEnd->getDebugLoc(), MustTailCallFn,
2684 Arguments, Builder);
2685 splitAround(Call, "MustTailCall.Before.CoroEnd");
2686 }
2687 }
2688
2689 // Later code makes structural assumptions about single predecessors phis e.g
2690 // that they are not live accross a suspend point.
2691 cleanupSinglePredPHIs(F);
2692
2693 // Transforms multi-edge PHI Nodes, so that any value feeding into a PHI will
2694 // never has its definition separated from the PHI by the suspend point.
2695 rewritePHIs(F);
2696
2697 // Build suspend crossing info.
2698 SuspendCrossingInfo Checker(F, Shape);
2699
2700 IRBuilder<> Builder(F.getContext());
2701 FrameDataInfo FrameData;
2702 SmallVector<CoroAllocaAllocInst*, 4> LocalAllocas;
2703 SmallVector<Instruction*, 4> DeadInstructions;
2704
2705 {
2706 SpillInfo Spills;
2707 for (int Repeat = 0; Repeat < 4; ++Repeat) {
2708 // See if there are materializable instructions across suspend points.
2709 for (Instruction &I : instructions(F))
2710 if (materializable(I)) {
2711 for (User *U : I.users())
2712 if (Checker.isDefinitionAcrossSuspend(I, U))
2713 Spills[&I].push_back(cast<Instruction>(U));
2714 }
2715
2716 if (Spills.empty())
2717 break;
2718
2719 // Rewrite materializable instructions to be materialized at the use
2720 // point.
2721 LLVM_DEBUG(dumpSpills("Materializations", Spills));
2722 rewriteMaterializableInstructions(Builder, Spills);
2723 Spills.clear();
2724 }
2725 }
2726
2727 if (Shape.ABI != coro::ABI::Async && Shape.ABI != coro::ABI::Retcon &&
2728 Shape.ABI != coro::ABI::RetconOnce)
2729 sinkLifetimeStartMarkers(F, Shape, Checker);
2730
2731 if (Shape.ABI != coro::ABI::Async || !Shape.CoroSuspends.empty())
2732 collectFrameAllocas(F, Shape, Checker, FrameData.Allocas);
2733 LLVM_DEBUG(dumpAllocas(FrameData.Allocas));
2734
2735 // Collect the spills for arguments and other not-materializable values.
2736 for (Argument &A : F.args())
2737 for (User *U : A.users())
2738 if (Checker.isDefinitionAcrossSuspend(A, U))
2739 FrameData.Spills[&A].push_back(cast<Instruction>(U));
2740
2741 for (Instruction &I : instructions(F)) {
2742 // Values returned from coroutine structure intrinsics should not be part
2743 // of the Coroutine Frame.
2744 if (isCoroutineStructureIntrinsic(I) || &I == Shape.CoroBegin)
2745 continue;
2746
2747 // The Coroutine Promise always included into coroutine frame, no need to
2748 // check for suspend crossing.
2749 if (Shape.ABI == coro::ABI::Switch &&
2750 Shape.SwitchLowering.PromiseAlloca == &I)
2751 continue;
2752
2753 // Handle alloca.alloc specially here.
2754 if (auto AI = dyn_cast<CoroAllocaAllocInst>(&I)) {
2755 // Check whether the alloca's lifetime is bounded by suspend points.
2756 if (isLocalAlloca(AI)) {
2757 LocalAllocas.push_back(AI);
2758 continue;
2759 }
2760
2761 // If not, do a quick rewrite of the alloca and then add spills of
2762 // the rewritten value. The rewrite doesn't invalidate anything in
2763 // Spills because the other alloca intrinsics have no other operands
2764 // besides AI, and it doesn't invalidate the iteration because we delay
2765 // erasing AI.
2766 auto Alloc = lowerNonLocalAlloca(AI, Shape, DeadInstructions);
2767
2768 for (User *U : Alloc->users()) {
2769 if (Checker.isDefinitionAcrossSuspend(*Alloc, U))
2770 FrameData.Spills[Alloc].push_back(cast<Instruction>(U));
2771 }
2772 continue;
2773 }
2774
2775 // Ignore alloca.get; we process this as part of coro.alloca.alloc.
2776 if (isa<CoroAllocaGetInst>(I))
2777 continue;
2778
2779 if (isa<AllocaInst>(I))
2780 continue;
2781
2782 for (User *U : I.users())
2783 if (Checker.isDefinitionAcrossSuspend(I, U)) {
2784 // We cannot spill a token.
2785 if (I.getType()->isTokenTy())
2786 report_fatal_error(
2787 "token definition is separated from the use by a suspend point");
2788 FrameData.Spills[&I].push_back(cast<Instruction>(U));
2789 }
2790 }
2791
2792 // We don't want the layout of coroutine frame to be affected
2793 // by debug information. So we only choose to salvage DbgValueInst for
2794 // whose value is already in the frame.
2795 // We would handle the dbg.values for allocas specially
2796 for (auto &Iter : FrameData.Spills) {
2797 auto *V = Iter.first;
2798 SmallVector<DbgValueInst *, 16> DVIs;
2799 findDbgValues(DVIs, V);
2800 for (DbgValueInst *DVI : DVIs)
2801 if (Checker.isDefinitionAcrossSuspend(*V, DVI))
2802 FrameData.Spills[V].push_back(DVI);
2803 }
2804
2805 LLVM_DEBUG(dumpSpills("Spills", FrameData.Spills));
2806 if (Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce ||
2807 Shape.ABI == coro::ABI::Async)
2808 sinkSpillUsesAfterCoroBegin(F, FrameData, Shape.CoroBegin);
2809 Shape.FrameTy = buildFrameType(F, Shape, FrameData);
2810 createFramePtr(Shape);
2811 // For now, this works for C++ programs only.
2812 buildFrameDebugInfo(F, Shape, FrameData);
2813 insertSpills(FrameData, Shape);
2814 lowerLocalAllocas(LocalAllocas, DeadInstructions);
2815
2816 for (auto I : DeadInstructions)
2817 I->eraseFromParent();
2818 }
2819