1 //===- SROA.cpp - Scalar Replacement Of Aggregates ------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This transformation implements the well known scalar replacement of
10 /// aggregates transformation. It tries to identify promotable elements of an
11 /// aggregate alloca, and promote them to registers. It will also try to
12 /// convert uses of an element (or set of elements) of an alloca into a vector
13 /// or bitfield-style integer scalar if appropriate.
14 ///
15 /// It works to do this with minimal slicing of the alloca so that regions
16 /// which are merely transferred in and out of external memory remain unchanged
17 /// and are not decomposed to scalar code.
18 ///
19 /// Because this also performs alloca promotion, it can be thought of as also
20 /// serving the purpose of SSA formation. The algorithm iterates on the
21 /// function until all opportunities for promotion have been realized.
22 ///
23 //===----------------------------------------------------------------------===//
24
25 #include "llvm/Transforms/Scalar/SROA.h"
26 #include "llvm/ADT/APInt.h"
27 #include "llvm/ADT/ArrayRef.h"
28 #include "llvm/ADT/DenseMap.h"
29 #include "llvm/ADT/PointerIntPair.h"
30 #include "llvm/ADT/STLExtras.h"
31 #include "llvm/ADT/SetVector.h"
32 #include "llvm/ADT/SmallBitVector.h"
33 #include "llvm/ADT/SmallPtrSet.h"
34 #include "llvm/ADT/SmallVector.h"
35 #include "llvm/ADT/Statistic.h"
36 #include "llvm/ADT/StringRef.h"
37 #include "llvm/ADT/Twine.h"
38 #include "llvm/ADT/iterator.h"
39 #include "llvm/ADT/iterator_range.h"
40 #include "llvm/Analysis/AssumptionCache.h"
41 #include "llvm/Analysis/GlobalsModRef.h"
42 #include "llvm/Analysis/Loads.h"
43 #include "llvm/Analysis/PtrUseVisitor.h"
44 #include "llvm/Config/llvm-config.h"
45 #include "llvm/IR/BasicBlock.h"
46 #include "llvm/IR/Constant.h"
47 #include "llvm/IR/ConstantFolder.h"
48 #include "llvm/IR/Constants.h"
49 #include "llvm/IR/DIBuilder.h"
50 #include "llvm/IR/DataLayout.h"
51 #include "llvm/IR/DebugInfo.h"
52 #include "llvm/IR/DebugInfoMetadata.h"
53 #include "llvm/IR/DerivedTypes.h"
54 #include "llvm/IR/Dominators.h"
55 #include "llvm/IR/Function.h"
56 #include "llvm/IR/GetElementPtrTypeIterator.h"
57 #include "llvm/IR/GlobalAlias.h"
58 #include "llvm/IR/IRBuilder.h"
59 #include "llvm/IR/InstVisitor.h"
60 #include "llvm/IR/Instruction.h"
61 #include "llvm/IR/Instructions.h"
62 #include "llvm/IR/IntrinsicInst.h"
63 #include "llvm/IR/LLVMContext.h"
64 #include "llvm/IR/Metadata.h"
65 #include "llvm/IR/Module.h"
66 #include "llvm/IR/Operator.h"
67 #include "llvm/IR/PassManager.h"
68 #include "llvm/IR/Type.h"
69 #include "llvm/IR/Use.h"
70 #include "llvm/IR/User.h"
71 #include "llvm/IR/Value.h"
72 #include "llvm/InitializePasses.h"
73 #include "llvm/Pass.h"
74 #include "llvm/Support/Casting.h"
75 #include "llvm/Support/CommandLine.h"
76 #include "llvm/Support/Compiler.h"
77 #include "llvm/Support/Debug.h"
78 #include "llvm/Support/ErrorHandling.h"
79 #include "llvm/Support/raw_ostream.h"
80 #include "llvm/Transforms/Scalar.h"
81 #include "llvm/Transforms/Utils/Local.h"
82 #include "llvm/Transforms/Utils/PromoteMemToReg.h"
83 #include <algorithm>
84 #include <cassert>
85 #include <cstddef>
86 #include <cstdint>
87 #include <cstring>
88 #include <iterator>
89 #include <string>
90 #include <tuple>
91 #include <utility>
92 #include <vector>
93
94 using namespace llvm;
95 using namespace llvm::sroa;
96
97 #define DEBUG_TYPE "sroa"
98
99 STATISTIC(NumAllocasAnalyzed, "Number of allocas analyzed for replacement");
100 STATISTIC(NumAllocaPartitions, "Number of alloca partitions formed");
101 STATISTIC(MaxPartitionsPerAlloca, "Maximum number of partitions per alloca");
102 STATISTIC(NumAllocaPartitionUses, "Number of alloca partition uses rewritten");
103 STATISTIC(MaxUsesPerAllocaPartition, "Maximum number of uses of a partition");
104 STATISTIC(NumNewAllocas, "Number of new, smaller allocas introduced");
105 STATISTIC(NumPromoted, "Number of allocas promoted to SSA values");
106 STATISTIC(NumLoadsSpeculated, "Number of loads speculated to allow promotion");
107 STATISTIC(NumDeleted, "Number of instructions deleted");
108 STATISTIC(NumVectorized, "Number of vectorized aggregates");
109
110 /// Hidden option to experiment with completely strict handling of inbounds
111 /// GEPs.
112 static cl::opt<bool> SROAStrictInbounds("sroa-strict-inbounds", cl::init(false),
113 cl::Hidden);
114
115 namespace {
116
117 /// A custom IRBuilder inserter which prefixes all names, but only in
118 /// Assert builds.
119 class IRBuilderPrefixedInserter final : public IRBuilderDefaultInserter {
120 std::string Prefix;
121
getNameWithPrefix(const Twine & Name) const122 Twine getNameWithPrefix(const Twine &Name) const {
123 return Name.isTriviallyEmpty() ? Name : Prefix + Name;
124 }
125
126 public:
SetNamePrefix(const Twine & P)127 void SetNamePrefix(const Twine &P) { Prefix = P.str(); }
128
InsertHelper(Instruction * I,const Twine & Name,BasicBlock * BB,BasicBlock::iterator InsertPt) const129 void InsertHelper(Instruction *I, const Twine &Name, BasicBlock *BB,
130 BasicBlock::iterator InsertPt) const override {
131 IRBuilderDefaultInserter::InsertHelper(I, getNameWithPrefix(Name), BB,
132 InsertPt);
133 }
134 };
135
136 /// Provide a type for IRBuilder that drops names in release builds.
137 using IRBuilderTy = IRBuilder<ConstantFolder, IRBuilderPrefixedInserter>;
138
139 /// A used slice of an alloca.
140 ///
141 /// This structure represents a slice of an alloca used by some instruction. It
142 /// stores both the begin and end offsets of this use, a pointer to the use
143 /// itself, and a flag indicating whether we can classify the use as splittable
144 /// or not when forming partitions of the alloca.
145 class Slice {
146 /// The beginning offset of the range.
147 uint64_t BeginOffset = 0;
148
149 /// The ending offset, not included in the range.
150 uint64_t EndOffset = 0;
151
152 /// Storage for both the use of this slice and whether it can be
153 /// split.
154 PointerIntPair<Use *, 1, bool> UseAndIsSplittable;
155
156 public:
157 Slice() = default;
158
Slice(uint64_t BeginOffset,uint64_t EndOffset,Use * U,bool IsSplittable)159 Slice(uint64_t BeginOffset, uint64_t EndOffset, Use *U, bool IsSplittable)
160 : BeginOffset(BeginOffset), EndOffset(EndOffset),
161 UseAndIsSplittable(U, IsSplittable) {}
162
beginOffset() const163 uint64_t beginOffset() const { return BeginOffset; }
endOffset() const164 uint64_t endOffset() const { return EndOffset; }
165
isSplittable() const166 bool isSplittable() const { return UseAndIsSplittable.getInt(); }
makeUnsplittable()167 void makeUnsplittable() { UseAndIsSplittable.setInt(false); }
168
getUse() const169 Use *getUse() const { return UseAndIsSplittable.getPointer(); }
170
isDead() const171 bool isDead() const { return getUse() == nullptr; }
kill()172 void kill() { UseAndIsSplittable.setPointer(nullptr); }
173
174 /// Support for ordering ranges.
175 ///
176 /// This provides an ordering over ranges such that start offsets are
177 /// always increasing, and within equal start offsets, the end offsets are
178 /// decreasing. Thus the spanning range comes first in a cluster with the
179 /// same start position.
operator <(const Slice & RHS) const180 bool operator<(const Slice &RHS) const {
181 if (beginOffset() < RHS.beginOffset())
182 return true;
183 if (beginOffset() > RHS.beginOffset())
184 return false;
185 if (isSplittable() != RHS.isSplittable())
186 return !isSplittable();
187 if (endOffset() > RHS.endOffset())
188 return true;
189 return false;
190 }
191
192 /// Support comparison with a single offset to allow binary searches.
operator <(const Slice & LHS,uint64_t RHSOffset)193 friend LLVM_ATTRIBUTE_UNUSED bool operator<(const Slice &LHS,
194 uint64_t RHSOffset) {
195 return LHS.beginOffset() < RHSOffset;
196 }
operator <(uint64_t LHSOffset,const Slice & RHS)197 friend LLVM_ATTRIBUTE_UNUSED bool operator<(uint64_t LHSOffset,
198 const Slice &RHS) {
199 return LHSOffset < RHS.beginOffset();
200 }
201
operator ==(const Slice & RHS) const202 bool operator==(const Slice &RHS) const {
203 return isSplittable() == RHS.isSplittable() &&
204 beginOffset() == RHS.beginOffset() && endOffset() == RHS.endOffset();
205 }
operator !=(const Slice & RHS) const206 bool operator!=(const Slice &RHS) const { return !operator==(RHS); }
207 };
208
209 } // end anonymous namespace
210
211 /// Representation of the alloca slices.
212 ///
213 /// This class represents the slices of an alloca which are formed by its
214 /// various uses. If a pointer escapes, we can't fully build a representation
215 /// for the slices used and we reflect that in this structure. The uses are
216 /// stored, sorted by increasing beginning offset and with unsplittable slices
217 /// starting at a particular offset before splittable slices.
218 class llvm::sroa::AllocaSlices {
219 public:
220 /// Construct the slices of a particular alloca.
221 AllocaSlices(const DataLayout &DL, AllocaInst &AI);
222
223 /// Test whether a pointer to the allocation escapes our analysis.
224 ///
225 /// If this is true, the slices are never fully built and should be
226 /// ignored.
isEscaped() const227 bool isEscaped() const { return PointerEscapingInstr; }
228
229 /// Support for iterating over the slices.
230 /// @{
231 using iterator = SmallVectorImpl<Slice>::iterator;
232 using range = iterator_range<iterator>;
233
begin()234 iterator begin() { return Slices.begin(); }
end()235 iterator end() { return Slices.end(); }
236
237 using const_iterator = SmallVectorImpl<Slice>::const_iterator;
238 using const_range = iterator_range<const_iterator>;
239
begin() const240 const_iterator begin() const { return Slices.begin(); }
end() const241 const_iterator end() const { return Slices.end(); }
242 /// @}
243
244 /// Erase a range of slices.
erase(iterator Start,iterator Stop)245 void erase(iterator Start, iterator Stop) { Slices.erase(Start, Stop); }
246
247 /// Insert new slices for this alloca.
248 ///
249 /// This moves the slices into the alloca's slices collection, and re-sorts
250 /// everything so that the usual ordering properties of the alloca's slices
251 /// hold.
insert(ArrayRef<Slice> NewSlices)252 void insert(ArrayRef<Slice> NewSlices) {
253 int OldSize = Slices.size();
254 Slices.append(NewSlices.begin(), NewSlices.end());
255 auto SliceI = Slices.begin() + OldSize;
256 llvm::sort(SliceI, Slices.end());
257 std::inplace_merge(Slices.begin(), SliceI, Slices.end());
258 }
259
260 // Forward declare the iterator and range accessor for walking the
261 // partitions.
262 class partition_iterator;
263 iterator_range<partition_iterator> partitions();
264
265 /// Access the dead users for this alloca.
getDeadUsers() const266 ArrayRef<Instruction *> getDeadUsers() const { return DeadUsers; }
267
268 /// Access Uses that should be dropped if the alloca is promotable.
getDeadUsesIfPromotable() const269 ArrayRef<Use *> getDeadUsesIfPromotable() const {
270 return DeadUseIfPromotable;
271 }
272
273 /// Access the dead operands referring to this alloca.
274 ///
275 /// These are operands which have cannot actually be used to refer to the
276 /// alloca as they are outside its range and the user doesn't correct for
277 /// that. These mostly consist of PHI node inputs and the like which we just
278 /// need to replace with undef.
getDeadOperands() const279 ArrayRef<Use *> getDeadOperands() const { return DeadOperands; }
280
281 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
282 void print(raw_ostream &OS, const_iterator I, StringRef Indent = " ") const;
283 void printSlice(raw_ostream &OS, const_iterator I,
284 StringRef Indent = " ") const;
285 void printUse(raw_ostream &OS, const_iterator I,
286 StringRef Indent = " ") const;
287 void print(raw_ostream &OS) const;
288 void dump(const_iterator I) const;
289 void dump() const;
290 #endif
291
292 private:
293 template <typename DerivedT, typename RetT = void> class BuilderBase;
294 class SliceBuilder;
295
296 friend class AllocaSlices::SliceBuilder;
297
298 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
299 /// Handle to alloca instruction to simplify method interfaces.
300 AllocaInst &AI;
301 #endif
302
303 /// The instruction responsible for this alloca not having a known set
304 /// of slices.
305 ///
306 /// When an instruction (potentially) escapes the pointer to the alloca, we
307 /// store a pointer to that here and abort trying to form slices of the
308 /// alloca. This will be null if the alloca slices are analyzed successfully.
309 Instruction *PointerEscapingInstr;
310
311 /// The slices of the alloca.
312 ///
313 /// We store a vector of the slices formed by uses of the alloca here. This
314 /// vector is sorted by increasing begin offset, and then the unsplittable
315 /// slices before the splittable ones. See the Slice inner class for more
316 /// details.
317 SmallVector<Slice, 8> Slices;
318
319 /// Instructions which will become dead if we rewrite the alloca.
320 ///
321 /// Note that these are not separated by slice. This is because we expect an
322 /// alloca to be completely rewritten or not rewritten at all. If rewritten,
323 /// all these instructions can simply be removed and replaced with poison as
324 /// they come from outside of the allocated space.
325 SmallVector<Instruction *, 8> DeadUsers;
326
327 /// Uses which will become dead if can promote the alloca.
328 SmallVector<Use *, 8> DeadUseIfPromotable;
329
330 /// Operands which will become dead if we rewrite the alloca.
331 ///
332 /// These are operands that in their particular use can be replaced with
333 /// poison when we rewrite the alloca. These show up in out-of-bounds inputs
334 /// to PHI nodes and the like. They aren't entirely dead (there might be
335 /// a GEP back into the bounds using it elsewhere) and nor is the PHI, but we
336 /// want to swap this particular input for poison to simplify the use lists of
337 /// the alloca.
338 SmallVector<Use *, 8> DeadOperands;
339 };
340
341 /// A partition of the slices.
342 ///
343 /// An ephemeral representation for a range of slices which can be viewed as
344 /// a partition of the alloca. This range represents a span of the alloca's
345 /// memory which cannot be split, and provides access to all of the slices
346 /// overlapping some part of the partition.
347 ///
348 /// Objects of this type are produced by traversing the alloca's slices, but
349 /// are only ephemeral and not persistent.
350 class llvm::sroa::Partition {
351 private:
352 friend class AllocaSlices;
353 friend class AllocaSlices::partition_iterator;
354
355 using iterator = AllocaSlices::iterator;
356
357 /// The beginning and ending offsets of the alloca for this
358 /// partition.
359 uint64_t BeginOffset = 0, EndOffset = 0;
360
361 /// The start and end iterators of this partition.
362 iterator SI, SJ;
363
364 /// A collection of split slice tails overlapping the partition.
365 SmallVector<Slice *, 4> SplitTails;
366
367 /// Raw constructor builds an empty partition starting and ending at
368 /// the given iterator.
Partition(iterator SI)369 Partition(iterator SI) : SI(SI), SJ(SI) {}
370
371 public:
372 /// The start offset of this partition.
373 ///
374 /// All of the contained slices start at or after this offset.
beginOffset() const375 uint64_t beginOffset() const { return BeginOffset; }
376
377 /// The end offset of this partition.
378 ///
379 /// All of the contained slices end at or before this offset.
endOffset() const380 uint64_t endOffset() const { return EndOffset; }
381
382 /// The size of the partition.
383 ///
384 /// Note that this can never be zero.
size() const385 uint64_t size() const {
386 assert(BeginOffset < EndOffset && "Partitions must span some bytes!");
387 return EndOffset - BeginOffset;
388 }
389
390 /// Test whether this partition contains no slices, and merely spans
391 /// a region occupied by split slices.
empty() const392 bool empty() const { return SI == SJ; }
393
394 /// \name Iterate slices that start within the partition.
395 /// These may be splittable or unsplittable. They have a begin offset >= the
396 /// partition begin offset.
397 /// @{
398 // FIXME: We should probably define a "concat_iterator" helper and use that
399 // to stitch together pointee_iterators over the split tails and the
400 // contiguous iterators of the partition. That would give a much nicer
401 // interface here. We could then additionally expose filtered iterators for
402 // split, unsplit, and unsplittable splices based on the usage patterns.
begin() const403 iterator begin() const { return SI; }
end() const404 iterator end() const { return SJ; }
405 /// @}
406
407 /// Get the sequence of split slice tails.
408 ///
409 /// These tails are of slices which start before this partition but are
410 /// split and overlap into the partition. We accumulate these while forming
411 /// partitions.
splitSliceTails() const412 ArrayRef<Slice *> splitSliceTails() const { return SplitTails; }
413 };
414
415 /// An iterator over partitions of the alloca's slices.
416 ///
417 /// This iterator implements the core algorithm for partitioning the alloca's
418 /// slices. It is a forward iterator as we don't support backtracking for
419 /// efficiency reasons, and re-use a single storage area to maintain the
420 /// current set of split slices.
421 ///
422 /// It is templated on the slice iterator type to use so that it can operate
423 /// with either const or non-const slice iterators.
424 class AllocaSlices::partition_iterator
425 : public iterator_facade_base<partition_iterator, std::forward_iterator_tag,
426 Partition> {
427 friend class AllocaSlices;
428
429 /// Most of the state for walking the partitions is held in a class
430 /// with a nice interface for examining them.
431 Partition P;
432
433 /// We need to keep the end of the slices to know when to stop.
434 AllocaSlices::iterator SE;
435
436 /// We also need to keep track of the maximum split end offset seen.
437 /// FIXME: Do we really?
438 uint64_t MaxSplitSliceEndOffset = 0;
439
440 /// Sets the partition to be empty at given iterator, and sets the
441 /// end iterator.
partition_iterator(AllocaSlices::iterator SI,AllocaSlices::iterator SE)442 partition_iterator(AllocaSlices::iterator SI, AllocaSlices::iterator SE)
443 : P(SI), SE(SE) {
444 // If not already at the end, advance our state to form the initial
445 // partition.
446 if (SI != SE)
447 advance();
448 }
449
450 /// Advance the iterator to the next partition.
451 ///
452 /// Requires that the iterator not be at the end of the slices.
advance()453 void advance() {
454 assert((P.SI != SE || !P.SplitTails.empty()) &&
455 "Cannot advance past the end of the slices!");
456
457 // Clear out any split uses which have ended.
458 if (!P.SplitTails.empty()) {
459 if (P.EndOffset >= MaxSplitSliceEndOffset) {
460 // If we've finished all splits, this is easy.
461 P.SplitTails.clear();
462 MaxSplitSliceEndOffset = 0;
463 } else {
464 // Remove the uses which have ended in the prior partition. This
465 // cannot change the max split slice end because we just checked that
466 // the prior partition ended prior to that max.
467 llvm::erase_if(P.SplitTails,
468 [&](Slice *S) { return S->endOffset() <= P.EndOffset; });
469 assert(llvm::any_of(P.SplitTails,
470 [&](Slice *S) {
471 return S->endOffset() == MaxSplitSliceEndOffset;
472 }) &&
473 "Could not find the current max split slice offset!");
474 assert(llvm::all_of(P.SplitTails,
475 [&](Slice *S) {
476 return S->endOffset() <= MaxSplitSliceEndOffset;
477 }) &&
478 "Max split slice end offset is not actually the max!");
479 }
480 }
481
482 // If P.SI is already at the end, then we've cleared the split tail and
483 // now have an end iterator.
484 if (P.SI == SE) {
485 assert(P.SplitTails.empty() && "Failed to clear the split slices!");
486 return;
487 }
488
489 // If we had a non-empty partition previously, set up the state for
490 // subsequent partitions.
491 if (P.SI != P.SJ) {
492 // Accumulate all the splittable slices which started in the old
493 // partition into the split list.
494 for (Slice &S : P)
495 if (S.isSplittable() && S.endOffset() > P.EndOffset) {
496 P.SplitTails.push_back(&S);
497 MaxSplitSliceEndOffset =
498 std::max(S.endOffset(), MaxSplitSliceEndOffset);
499 }
500
501 // Start from the end of the previous partition.
502 P.SI = P.SJ;
503
504 // If P.SI is now at the end, we at most have a tail of split slices.
505 if (P.SI == SE) {
506 P.BeginOffset = P.EndOffset;
507 P.EndOffset = MaxSplitSliceEndOffset;
508 return;
509 }
510
511 // If the we have split slices and the next slice is after a gap and is
512 // not splittable immediately form an empty partition for the split
513 // slices up until the next slice begins.
514 if (!P.SplitTails.empty() && P.SI->beginOffset() != P.EndOffset &&
515 !P.SI->isSplittable()) {
516 P.BeginOffset = P.EndOffset;
517 P.EndOffset = P.SI->beginOffset();
518 return;
519 }
520 }
521
522 // OK, we need to consume new slices. Set the end offset based on the
523 // current slice, and step SJ past it. The beginning offset of the
524 // partition is the beginning offset of the next slice unless we have
525 // pre-existing split slices that are continuing, in which case we begin
526 // at the prior end offset.
527 P.BeginOffset = P.SplitTails.empty() ? P.SI->beginOffset() : P.EndOffset;
528 P.EndOffset = P.SI->endOffset();
529 ++P.SJ;
530
531 // There are two strategies to form a partition based on whether the
532 // partition starts with an unsplittable slice or a splittable slice.
533 if (!P.SI->isSplittable()) {
534 // When we're forming an unsplittable region, it must always start at
535 // the first slice and will extend through its end.
536 assert(P.BeginOffset == P.SI->beginOffset());
537
538 // Form a partition including all of the overlapping slices with this
539 // unsplittable slice.
540 while (P.SJ != SE && P.SJ->beginOffset() < P.EndOffset) {
541 if (!P.SJ->isSplittable())
542 P.EndOffset = std::max(P.EndOffset, P.SJ->endOffset());
543 ++P.SJ;
544 }
545
546 // We have a partition across a set of overlapping unsplittable
547 // partitions.
548 return;
549 }
550
551 // If we're starting with a splittable slice, then we need to form
552 // a synthetic partition spanning it and any other overlapping splittable
553 // splices.
554 assert(P.SI->isSplittable() && "Forming a splittable partition!");
555
556 // Collect all of the overlapping splittable slices.
557 while (P.SJ != SE && P.SJ->beginOffset() < P.EndOffset &&
558 P.SJ->isSplittable()) {
559 P.EndOffset = std::max(P.EndOffset, P.SJ->endOffset());
560 ++P.SJ;
561 }
562
563 // Back upiP.EndOffset if we ended the span early when encountering an
564 // unsplittable slice. This synthesizes the early end offset of
565 // a partition spanning only splittable slices.
566 if (P.SJ != SE && P.SJ->beginOffset() < P.EndOffset) {
567 assert(!P.SJ->isSplittable());
568 P.EndOffset = P.SJ->beginOffset();
569 }
570 }
571
572 public:
operator ==(const partition_iterator & RHS) const573 bool operator==(const partition_iterator &RHS) const {
574 assert(SE == RHS.SE &&
575 "End iterators don't match between compared partition iterators!");
576
577 // The observed positions of partitions is marked by the P.SI iterator and
578 // the emptiness of the split slices. The latter is only relevant when
579 // P.SI == SE, as the end iterator will additionally have an empty split
580 // slices list, but the prior may have the same P.SI and a tail of split
581 // slices.
582 if (P.SI == RHS.P.SI && P.SplitTails.empty() == RHS.P.SplitTails.empty()) {
583 assert(P.SJ == RHS.P.SJ &&
584 "Same set of slices formed two different sized partitions!");
585 assert(P.SplitTails.size() == RHS.P.SplitTails.size() &&
586 "Same slice position with differently sized non-empty split "
587 "slice tails!");
588 return true;
589 }
590 return false;
591 }
592
operator ++()593 partition_iterator &operator++() {
594 advance();
595 return *this;
596 }
597
operator *()598 Partition &operator*() { return P; }
599 };
600
601 /// A forward range over the partitions of the alloca's slices.
602 ///
603 /// This accesses an iterator range over the partitions of the alloca's
604 /// slices. It computes these partitions on the fly based on the overlapping
605 /// offsets of the slices and the ability to split them. It will visit "empty"
606 /// partitions to cover regions of the alloca only accessed via split
607 /// slices.
partitions()608 iterator_range<AllocaSlices::partition_iterator> AllocaSlices::partitions() {
609 return make_range(partition_iterator(begin(), end()),
610 partition_iterator(end(), end()));
611 }
612
foldSelectInst(SelectInst & SI)613 static Value *foldSelectInst(SelectInst &SI) {
614 // If the condition being selected on is a constant or the same value is
615 // being selected between, fold the select. Yes this does (rarely) happen
616 // early on.
617 if (ConstantInt *CI = dyn_cast<ConstantInt>(SI.getCondition()))
618 return SI.getOperand(1 + CI->isZero());
619 if (SI.getOperand(1) == SI.getOperand(2))
620 return SI.getOperand(1);
621
622 return nullptr;
623 }
624
625 /// A helper that folds a PHI node or a select.
foldPHINodeOrSelectInst(Instruction & I)626 static Value *foldPHINodeOrSelectInst(Instruction &I) {
627 if (PHINode *PN = dyn_cast<PHINode>(&I)) {
628 // If PN merges together the same value, return that value.
629 return PN->hasConstantValue();
630 }
631 return foldSelectInst(cast<SelectInst>(I));
632 }
633
634 /// Builder for the alloca slices.
635 ///
636 /// This class builds a set of alloca slices by recursively visiting the uses
637 /// of an alloca and making a slice for each load and store at each offset.
638 class AllocaSlices::SliceBuilder : public PtrUseVisitor<SliceBuilder> {
639 friend class PtrUseVisitor<SliceBuilder>;
640 friend class InstVisitor<SliceBuilder>;
641
642 using Base = PtrUseVisitor<SliceBuilder>;
643
644 const uint64_t AllocSize;
645 AllocaSlices &AS;
646
647 SmallDenseMap<Instruction *, unsigned> MemTransferSliceMap;
648 SmallDenseMap<Instruction *, uint64_t> PHIOrSelectSizes;
649
650 /// Set to de-duplicate dead instructions found in the use walk.
651 SmallPtrSet<Instruction *, 4> VisitedDeadInsts;
652
653 public:
SliceBuilder(const DataLayout & DL,AllocaInst & AI,AllocaSlices & AS)654 SliceBuilder(const DataLayout &DL, AllocaInst &AI, AllocaSlices &AS)
655 : PtrUseVisitor<SliceBuilder>(DL),
656 AllocSize(DL.getTypeAllocSize(AI.getAllocatedType()).getFixedSize()),
657 AS(AS) {}
658
659 private:
markAsDead(Instruction & I)660 void markAsDead(Instruction &I) {
661 if (VisitedDeadInsts.insert(&I).second)
662 AS.DeadUsers.push_back(&I);
663 }
664
insertUse(Instruction & I,const APInt & Offset,uint64_t Size,bool IsSplittable=false)665 void insertUse(Instruction &I, const APInt &Offset, uint64_t Size,
666 bool IsSplittable = false) {
667 // Completely skip uses which have a zero size or start either before or
668 // past the end of the allocation.
669 if (Size == 0 || Offset.uge(AllocSize)) {
670 LLVM_DEBUG(dbgs() << "WARNING: Ignoring " << Size << " byte use @"
671 << Offset
672 << " which has zero size or starts outside of the "
673 << AllocSize << " byte alloca:\n"
674 << " alloca: " << AS.AI << "\n"
675 << " use: " << I << "\n");
676 return markAsDead(I);
677 }
678
679 uint64_t BeginOffset = Offset.getZExtValue();
680 uint64_t EndOffset = BeginOffset + Size;
681
682 // Clamp the end offset to the end of the allocation. Note that this is
683 // formulated to handle even the case where "BeginOffset + Size" overflows.
684 // This may appear superficially to be something we could ignore entirely,
685 // but that is not so! There may be widened loads or PHI-node uses where
686 // some instructions are dead but not others. We can't completely ignore
687 // them, and so have to record at least the information here.
688 assert(AllocSize >= BeginOffset); // Established above.
689 if (Size > AllocSize - BeginOffset) {
690 LLVM_DEBUG(dbgs() << "WARNING: Clamping a " << Size << " byte use @"
691 << Offset << " to remain within the " << AllocSize
692 << " byte alloca:\n"
693 << " alloca: " << AS.AI << "\n"
694 << " use: " << I << "\n");
695 EndOffset = AllocSize;
696 }
697
698 AS.Slices.push_back(Slice(BeginOffset, EndOffset, U, IsSplittable));
699 }
700
visitBitCastInst(BitCastInst & BC)701 void visitBitCastInst(BitCastInst &BC) {
702 if (BC.use_empty())
703 return markAsDead(BC);
704
705 return Base::visitBitCastInst(BC);
706 }
707
visitAddrSpaceCastInst(AddrSpaceCastInst & ASC)708 void visitAddrSpaceCastInst(AddrSpaceCastInst &ASC) {
709 if (ASC.use_empty())
710 return markAsDead(ASC);
711
712 return Base::visitAddrSpaceCastInst(ASC);
713 }
714
visitGetElementPtrInst(GetElementPtrInst & GEPI)715 void visitGetElementPtrInst(GetElementPtrInst &GEPI) {
716 if (GEPI.use_empty())
717 return markAsDead(GEPI);
718
719 if (SROAStrictInbounds && GEPI.isInBounds()) {
720 // FIXME: This is a manually un-factored variant of the basic code inside
721 // of GEPs with checking of the inbounds invariant specified in the
722 // langref in a very strict sense. If we ever want to enable
723 // SROAStrictInbounds, this code should be factored cleanly into
724 // PtrUseVisitor, but it is easier to experiment with SROAStrictInbounds
725 // by writing out the code here where we have the underlying allocation
726 // size readily available.
727 APInt GEPOffset = Offset;
728 const DataLayout &DL = GEPI.getModule()->getDataLayout();
729 for (gep_type_iterator GTI = gep_type_begin(GEPI),
730 GTE = gep_type_end(GEPI);
731 GTI != GTE; ++GTI) {
732 ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand());
733 if (!OpC)
734 break;
735
736 // Handle a struct index, which adds its field offset to the pointer.
737 if (StructType *STy = GTI.getStructTypeOrNull()) {
738 unsigned ElementIdx = OpC->getZExtValue();
739 const StructLayout *SL = DL.getStructLayout(STy);
740 GEPOffset +=
741 APInt(Offset.getBitWidth(), SL->getElementOffset(ElementIdx));
742 } else {
743 // For array or vector indices, scale the index by the size of the
744 // type.
745 APInt Index = OpC->getValue().sextOrTrunc(Offset.getBitWidth());
746 GEPOffset +=
747 Index *
748 APInt(Offset.getBitWidth(),
749 DL.getTypeAllocSize(GTI.getIndexedType()).getFixedSize());
750 }
751
752 // If this index has computed an intermediate pointer which is not
753 // inbounds, then the result of the GEP is a poison value and we can
754 // delete it and all uses.
755 if (GEPOffset.ugt(AllocSize))
756 return markAsDead(GEPI);
757 }
758 }
759
760 return Base::visitGetElementPtrInst(GEPI);
761 }
762
handleLoadOrStore(Type * Ty,Instruction & I,const APInt & Offset,uint64_t Size,bool IsVolatile)763 void handleLoadOrStore(Type *Ty, Instruction &I, const APInt &Offset,
764 uint64_t Size, bool IsVolatile) {
765 // We allow splitting of non-volatile loads and stores where the type is an
766 // integer type. These may be used to implement 'memcpy' or other "transfer
767 // of bits" patterns.
768 bool IsSplittable =
769 Ty->isIntegerTy() && !IsVolatile && DL.typeSizeEqualsStoreSize(Ty);
770
771 insertUse(I, Offset, Size, IsSplittable);
772 }
773
visitLoadInst(LoadInst & LI)774 void visitLoadInst(LoadInst &LI) {
775 assert((!LI.isSimple() || LI.getType()->isSingleValueType()) &&
776 "All simple FCA loads should have been pre-split");
777
778 if (!IsOffsetKnown)
779 return PI.setAborted(&LI);
780
781 if (LI.isVolatile() &&
782 LI.getPointerAddressSpace() != DL.getAllocaAddrSpace())
783 return PI.setAborted(&LI);
784
785 if (isa<ScalableVectorType>(LI.getType()))
786 return PI.setAborted(&LI);
787
788 uint64_t Size = DL.getTypeStoreSize(LI.getType()).getFixedSize();
789 return handleLoadOrStore(LI.getType(), LI, Offset, Size, LI.isVolatile());
790 }
791
visitStoreInst(StoreInst & SI)792 void visitStoreInst(StoreInst &SI) {
793 Value *ValOp = SI.getValueOperand();
794 if (ValOp == *U)
795 return PI.setEscapedAndAborted(&SI);
796 if (!IsOffsetKnown)
797 return PI.setAborted(&SI);
798
799 if (SI.isVolatile() &&
800 SI.getPointerAddressSpace() != DL.getAllocaAddrSpace())
801 return PI.setAborted(&SI);
802
803 if (isa<ScalableVectorType>(ValOp->getType()))
804 return PI.setAborted(&SI);
805
806 uint64_t Size = DL.getTypeStoreSize(ValOp->getType()).getFixedSize();
807
808 // If this memory access can be shown to *statically* extend outside the
809 // bounds of the allocation, it's behavior is undefined, so simply
810 // ignore it. Note that this is more strict than the generic clamping
811 // behavior of insertUse. We also try to handle cases which might run the
812 // risk of overflow.
813 // FIXME: We should instead consider the pointer to have escaped if this
814 // function is being instrumented for addressing bugs or race conditions.
815 if (Size > AllocSize || Offset.ugt(AllocSize - Size)) {
816 LLVM_DEBUG(dbgs() << "WARNING: Ignoring " << Size << " byte store @"
817 << Offset << " which extends past the end of the "
818 << AllocSize << " byte alloca:\n"
819 << " alloca: " << AS.AI << "\n"
820 << " use: " << SI << "\n");
821 return markAsDead(SI);
822 }
823
824 assert((!SI.isSimple() || ValOp->getType()->isSingleValueType()) &&
825 "All simple FCA stores should have been pre-split");
826 handleLoadOrStore(ValOp->getType(), SI, Offset, Size, SI.isVolatile());
827 }
828
visitMemSetInst(MemSetInst & II)829 void visitMemSetInst(MemSetInst &II) {
830 assert(II.getRawDest() == *U && "Pointer use is not the destination?");
831 ConstantInt *Length = dyn_cast<ConstantInt>(II.getLength());
832 if ((Length && Length->getValue() == 0) ||
833 (IsOffsetKnown && Offset.uge(AllocSize)))
834 // Zero-length mem transfer intrinsics can be ignored entirely.
835 return markAsDead(II);
836
837 if (!IsOffsetKnown)
838 return PI.setAborted(&II);
839
840 // Don't replace this with a store with a different address space. TODO:
841 // Use a store with the casted new alloca?
842 if (II.isVolatile() && II.getDestAddressSpace() != DL.getAllocaAddrSpace())
843 return PI.setAborted(&II);
844
845 insertUse(II, Offset, Length ? Length->getLimitedValue()
846 : AllocSize - Offset.getLimitedValue(),
847 (bool)Length);
848 }
849
visitMemTransferInst(MemTransferInst & II)850 void visitMemTransferInst(MemTransferInst &II) {
851 ConstantInt *Length = dyn_cast<ConstantInt>(II.getLength());
852 if (Length && Length->getValue() == 0)
853 // Zero-length mem transfer intrinsics can be ignored entirely.
854 return markAsDead(II);
855
856 // Because we can visit these intrinsics twice, also check to see if the
857 // first time marked this instruction as dead. If so, skip it.
858 if (VisitedDeadInsts.count(&II))
859 return;
860
861 if (!IsOffsetKnown)
862 return PI.setAborted(&II);
863
864 // Don't replace this with a load/store with a different address space.
865 // TODO: Use a store with the casted new alloca?
866 if (II.isVolatile() &&
867 (II.getDestAddressSpace() != DL.getAllocaAddrSpace() ||
868 II.getSourceAddressSpace() != DL.getAllocaAddrSpace()))
869 return PI.setAborted(&II);
870
871 // This side of the transfer is completely out-of-bounds, and so we can
872 // nuke the entire transfer. However, we also need to nuke the other side
873 // if already added to our partitions.
874 // FIXME: Yet another place we really should bypass this when
875 // instrumenting for ASan.
876 if (Offset.uge(AllocSize)) {
877 SmallDenseMap<Instruction *, unsigned>::iterator MTPI =
878 MemTransferSliceMap.find(&II);
879 if (MTPI != MemTransferSliceMap.end())
880 AS.Slices[MTPI->second].kill();
881 return markAsDead(II);
882 }
883
884 uint64_t RawOffset = Offset.getLimitedValue();
885 uint64_t Size = Length ? Length->getLimitedValue() : AllocSize - RawOffset;
886
887 // Check for the special case where the same exact value is used for both
888 // source and dest.
889 if (*U == II.getRawDest() && *U == II.getRawSource()) {
890 // For non-volatile transfers this is a no-op.
891 if (!II.isVolatile())
892 return markAsDead(II);
893
894 return insertUse(II, Offset, Size, /*IsSplittable=*/false);
895 }
896
897 // If we have seen both source and destination for a mem transfer, then
898 // they both point to the same alloca.
899 bool Inserted;
900 SmallDenseMap<Instruction *, unsigned>::iterator MTPI;
901 std::tie(MTPI, Inserted) =
902 MemTransferSliceMap.insert(std::make_pair(&II, AS.Slices.size()));
903 unsigned PrevIdx = MTPI->second;
904 if (!Inserted) {
905 Slice &PrevP = AS.Slices[PrevIdx];
906
907 // Check if the begin offsets match and this is a non-volatile transfer.
908 // In that case, we can completely elide the transfer.
909 if (!II.isVolatile() && PrevP.beginOffset() == RawOffset) {
910 PrevP.kill();
911 return markAsDead(II);
912 }
913
914 // Otherwise we have an offset transfer within the same alloca. We can't
915 // split those.
916 PrevP.makeUnsplittable();
917 }
918
919 // Insert the use now that we've fixed up the splittable nature.
920 insertUse(II, Offset, Size, /*IsSplittable=*/Inserted && Length);
921
922 // Check that we ended up with a valid index in the map.
923 assert(AS.Slices[PrevIdx].getUse()->getUser() == &II &&
924 "Map index doesn't point back to a slice with this user.");
925 }
926
927 // Disable SRoA for any intrinsics except for lifetime invariants and
928 // invariant group.
929 // FIXME: What about debug intrinsics? This matches old behavior, but
930 // doesn't make sense.
visitIntrinsicInst(IntrinsicInst & II)931 void visitIntrinsicInst(IntrinsicInst &II) {
932 if (II.isDroppable()) {
933 AS.DeadUseIfPromotable.push_back(U);
934 return;
935 }
936
937 if (!IsOffsetKnown)
938 return PI.setAborted(&II);
939
940 if (II.isLifetimeStartOrEnd()) {
941 ConstantInt *Length = cast<ConstantInt>(II.getArgOperand(0));
942 uint64_t Size = std::min(AllocSize - Offset.getLimitedValue(),
943 Length->getLimitedValue());
944 insertUse(II, Offset, Size, true);
945 return;
946 }
947
948 if (II.isLaunderOrStripInvariantGroup()) {
949 enqueueUsers(II);
950 return;
951 }
952
953 Base::visitIntrinsicInst(II);
954 }
955
hasUnsafePHIOrSelectUse(Instruction * Root,uint64_t & Size)956 Instruction *hasUnsafePHIOrSelectUse(Instruction *Root, uint64_t &Size) {
957 // We consider any PHI or select that results in a direct load or store of
958 // the same offset to be a viable use for slicing purposes. These uses
959 // are considered unsplittable and the size is the maximum loaded or stored
960 // size.
961 SmallPtrSet<Instruction *, 4> Visited;
962 SmallVector<std::pair<Instruction *, Instruction *>, 4> Uses;
963 Visited.insert(Root);
964 Uses.push_back(std::make_pair(cast<Instruction>(*U), Root));
965 const DataLayout &DL = Root->getModule()->getDataLayout();
966 // If there are no loads or stores, the access is dead. We mark that as
967 // a size zero access.
968 Size = 0;
969 do {
970 Instruction *I, *UsedI;
971 std::tie(UsedI, I) = Uses.pop_back_val();
972
973 if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
974 Size = std::max(Size,
975 DL.getTypeStoreSize(LI->getType()).getFixedSize());
976 continue;
977 }
978 if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
979 Value *Op = SI->getOperand(0);
980 if (Op == UsedI)
981 return SI;
982 Size = std::max(Size,
983 DL.getTypeStoreSize(Op->getType()).getFixedSize());
984 continue;
985 }
986
987 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I)) {
988 if (!GEP->hasAllZeroIndices())
989 return GEP;
990 } else if (!isa<BitCastInst>(I) && !isa<PHINode>(I) &&
991 !isa<SelectInst>(I) && !isa<AddrSpaceCastInst>(I)) {
992 return I;
993 }
994
995 for (User *U : I->users())
996 if (Visited.insert(cast<Instruction>(U)).second)
997 Uses.push_back(std::make_pair(I, cast<Instruction>(U)));
998 } while (!Uses.empty());
999
1000 return nullptr;
1001 }
1002
visitPHINodeOrSelectInst(Instruction & I)1003 void visitPHINodeOrSelectInst(Instruction &I) {
1004 assert(isa<PHINode>(I) || isa<SelectInst>(I));
1005 if (I.use_empty())
1006 return markAsDead(I);
1007
1008 // If this is a PHI node before a catchswitch, we cannot insert any non-PHI
1009 // instructions in this BB, which may be required during rewriting. Bail out
1010 // on these cases.
1011 if (isa<PHINode>(I) &&
1012 I.getParent()->getFirstInsertionPt() == I.getParent()->end())
1013 return PI.setAborted(&I);
1014
1015 // TODO: We could use simplifyInstruction here to fold PHINodes and
1016 // SelectInsts. However, doing so requires to change the current
1017 // dead-operand-tracking mechanism. For instance, suppose neither loading
1018 // from %U nor %other traps. Then "load (select undef, %U, %other)" does not
1019 // trap either. However, if we simply replace %U with undef using the
1020 // current dead-operand-tracking mechanism, "load (select undef, undef,
1021 // %other)" may trap because the select may return the first operand
1022 // "undef".
1023 if (Value *Result = foldPHINodeOrSelectInst(I)) {
1024 if (Result == *U)
1025 // If the result of the constant fold will be the pointer, recurse
1026 // through the PHI/select as if we had RAUW'ed it.
1027 enqueueUsers(I);
1028 else
1029 // Otherwise the operand to the PHI/select is dead, and we can replace
1030 // it with poison.
1031 AS.DeadOperands.push_back(U);
1032
1033 return;
1034 }
1035
1036 if (!IsOffsetKnown)
1037 return PI.setAborted(&I);
1038
1039 // See if we already have computed info on this node.
1040 uint64_t &Size = PHIOrSelectSizes[&I];
1041 if (!Size) {
1042 // This is a new PHI/Select, check for an unsafe use of it.
1043 if (Instruction *UnsafeI = hasUnsafePHIOrSelectUse(&I, Size))
1044 return PI.setAborted(UnsafeI);
1045 }
1046
1047 // For PHI and select operands outside the alloca, we can't nuke the entire
1048 // phi or select -- the other side might still be relevant, so we special
1049 // case them here and use a separate structure to track the operands
1050 // themselves which should be replaced with poison.
1051 // FIXME: This should instead be escaped in the event we're instrumenting
1052 // for address sanitization.
1053 if (Offset.uge(AllocSize)) {
1054 AS.DeadOperands.push_back(U);
1055 return;
1056 }
1057
1058 insertUse(I, Offset, Size);
1059 }
1060
visitPHINode(PHINode & PN)1061 void visitPHINode(PHINode &PN) { visitPHINodeOrSelectInst(PN); }
1062
visitSelectInst(SelectInst & SI)1063 void visitSelectInst(SelectInst &SI) { visitPHINodeOrSelectInst(SI); }
1064
1065 /// Disable SROA entirely if there are unhandled users of the alloca.
visitInstruction(Instruction & I)1066 void visitInstruction(Instruction &I) { PI.setAborted(&I); }
1067 };
1068
AllocaSlices(const DataLayout & DL,AllocaInst & AI)1069 AllocaSlices::AllocaSlices(const DataLayout &DL, AllocaInst &AI)
1070 :
1071 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1072 AI(AI),
1073 #endif
1074 PointerEscapingInstr(nullptr) {
1075 SliceBuilder PB(DL, AI, *this);
1076 SliceBuilder::PtrInfo PtrI = PB.visitPtr(AI);
1077 if (PtrI.isEscaped() || PtrI.isAborted()) {
1078 // FIXME: We should sink the escape vs. abort info into the caller nicely,
1079 // possibly by just storing the PtrInfo in the AllocaSlices.
1080 PointerEscapingInstr = PtrI.getEscapingInst() ? PtrI.getEscapingInst()
1081 : PtrI.getAbortingInst();
1082 assert(PointerEscapingInstr && "Did not track a bad instruction");
1083 return;
1084 }
1085
1086 llvm::erase_if(Slices, [](const Slice &S) { return S.isDead(); });
1087
1088 // Sort the uses. This arranges for the offsets to be in ascending order,
1089 // and the sizes to be in descending order.
1090 llvm::stable_sort(Slices);
1091 }
1092
1093 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1094
print(raw_ostream & OS,const_iterator I,StringRef Indent) const1095 void AllocaSlices::print(raw_ostream &OS, const_iterator I,
1096 StringRef Indent) const {
1097 printSlice(OS, I, Indent);
1098 OS << "\n";
1099 printUse(OS, I, Indent);
1100 }
1101
printSlice(raw_ostream & OS,const_iterator I,StringRef Indent) const1102 void AllocaSlices::printSlice(raw_ostream &OS, const_iterator I,
1103 StringRef Indent) const {
1104 OS << Indent << "[" << I->beginOffset() << "," << I->endOffset() << ")"
1105 << " slice #" << (I - begin())
1106 << (I->isSplittable() ? " (splittable)" : "");
1107 }
1108
printUse(raw_ostream & OS,const_iterator I,StringRef Indent) const1109 void AllocaSlices::printUse(raw_ostream &OS, const_iterator I,
1110 StringRef Indent) const {
1111 OS << Indent << " used by: " << *I->getUse()->getUser() << "\n";
1112 }
1113
print(raw_ostream & OS) const1114 void AllocaSlices::print(raw_ostream &OS) const {
1115 if (PointerEscapingInstr) {
1116 OS << "Can't analyze slices for alloca: " << AI << "\n"
1117 << " A pointer to this alloca escaped by:\n"
1118 << " " << *PointerEscapingInstr << "\n";
1119 return;
1120 }
1121
1122 OS << "Slices of alloca: " << AI << "\n";
1123 for (const_iterator I = begin(), E = end(); I != E; ++I)
1124 print(OS, I);
1125 }
1126
dump(const_iterator I) const1127 LLVM_DUMP_METHOD void AllocaSlices::dump(const_iterator I) const {
1128 print(dbgs(), I);
1129 }
dump() const1130 LLVM_DUMP_METHOD void AllocaSlices::dump() const { print(dbgs()); }
1131
1132 #endif // !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1133
1134 /// Walk the range of a partitioning looking for a common type to cover this
1135 /// sequence of slices.
1136 static std::pair<Type *, IntegerType *>
findCommonType(AllocaSlices::const_iterator B,AllocaSlices::const_iterator E,uint64_t EndOffset)1137 findCommonType(AllocaSlices::const_iterator B, AllocaSlices::const_iterator E,
1138 uint64_t EndOffset) {
1139 Type *Ty = nullptr;
1140 bool TyIsCommon = true;
1141 IntegerType *ITy = nullptr;
1142
1143 // Note that we need to look at *every* alloca slice's Use to ensure we
1144 // always get consistent results regardless of the order of slices.
1145 for (AllocaSlices::const_iterator I = B; I != E; ++I) {
1146 Use *U = I->getUse();
1147 if (isa<IntrinsicInst>(*U->getUser()))
1148 continue;
1149 if (I->beginOffset() != B->beginOffset() || I->endOffset() != EndOffset)
1150 continue;
1151
1152 Type *UserTy = nullptr;
1153 if (LoadInst *LI = dyn_cast<LoadInst>(U->getUser())) {
1154 UserTy = LI->getType();
1155 } else if (StoreInst *SI = dyn_cast<StoreInst>(U->getUser())) {
1156 UserTy = SI->getValueOperand()->getType();
1157 }
1158
1159 if (IntegerType *UserITy = dyn_cast_or_null<IntegerType>(UserTy)) {
1160 // If the type is larger than the partition, skip it. We only encounter
1161 // this for split integer operations where we want to use the type of the
1162 // entity causing the split. Also skip if the type is not a byte width
1163 // multiple.
1164 if (UserITy->getBitWidth() % 8 != 0 ||
1165 UserITy->getBitWidth() / 8 > (EndOffset - B->beginOffset()))
1166 continue;
1167
1168 // Track the largest bitwidth integer type used in this way in case there
1169 // is no common type.
1170 if (!ITy || ITy->getBitWidth() < UserITy->getBitWidth())
1171 ITy = UserITy;
1172 }
1173
1174 // To avoid depending on the order of slices, Ty and TyIsCommon must not
1175 // depend on types skipped above.
1176 if (!UserTy || (Ty && Ty != UserTy))
1177 TyIsCommon = false; // Give up on anything but an iN type.
1178 else
1179 Ty = UserTy;
1180 }
1181
1182 return {TyIsCommon ? Ty : nullptr, ITy};
1183 }
1184
1185 /// PHI instructions that use an alloca and are subsequently loaded can be
1186 /// rewritten to load both input pointers in the pred blocks and then PHI the
1187 /// results, allowing the load of the alloca to be promoted.
1188 /// From this:
1189 /// %P2 = phi [i32* %Alloca, i32* %Other]
1190 /// %V = load i32* %P2
1191 /// to:
1192 /// %V1 = load i32* %Alloca -> will be mem2reg'd
1193 /// ...
1194 /// %V2 = load i32* %Other
1195 /// ...
1196 /// %V = phi [i32 %V1, i32 %V2]
1197 ///
1198 /// We can do this to a select if its only uses are loads and if the operands
1199 /// to the select can be loaded unconditionally.
1200 ///
1201 /// FIXME: This should be hoisted into a generic utility, likely in
1202 /// Transforms/Util/Local.h
isSafePHIToSpeculate(PHINode & PN)1203 static bool isSafePHIToSpeculate(PHINode &PN) {
1204 const DataLayout &DL = PN.getModule()->getDataLayout();
1205
1206 // For now, we can only do this promotion if the load is in the same block
1207 // as the PHI, and if there are no stores between the phi and load.
1208 // TODO: Allow recursive phi users.
1209 // TODO: Allow stores.
1210 BasicBlock *BB = PN.getParent();
1211 Align MaxAlign;
1212 uint64_t APWidth = DL.getIndexTypeSizeInBits(PN.getType());
1213 Type *LoadType = nullptr;
1214 for (User *U : PN.users()) {
1215 LoadInst *LI = dyn_cast<LoadInst>(U);
1216 if (!LI || !LI->isSimple())
1217 return false;
1218
1219 // For now we only allow loads in the same block as the PHI. This is
1220 // a common case that happens when instcombine merges two loads through
1221 // a PHI.
1222 if (LI->getParent() != BB)
1223 return false;
1224
1225 if (LoadType) {
1226 if (LoadType != LI->getType())
1227 return false;
1228 } else {
1229 LoadType = LI->getType();
1230 }
1231
1232 // Ensure that there are no instructions between the PHI and the load that
1233 // could store.
1234 for (BasicBlock::iterator BBI(PN); &*BBI != LI; ++BBI)
1235 if (BBI->mayWriteToMemory())
1236 return false;
1237
1238 MaxAlign = std::max(MaxAlign, LI->getAlign());
1239 }
1240
1241 if (!LoadType)
1242 return false;
1243
1244 APInt LoadSize = APInt(APWidth, DL.getTypeStoreSize(LoadType).getFixedSize());
1245
1246 // We can only transform this if it is safe to push the loads into the
1247 // predecessor blocks. The only thing to watch out for is that we can't put
1248 // a possibly trapping load in the predecessor if it is a critical edge.
1249 for (unsigned Idx = 0, Num = PN.getNumIncomingValues(); Idx != Num; ++Idx) {
1250 Instruction *TI = PN.getIncomingBlock(Idx)->getTerminator();
1251 Value *InVal = PN.getIncomingValue(Idx);
1252
1253 // If the value is produced by the terminator of the predecessor (an
1254 // invoke) or it has side-effects, there is no valid place to put a load
1255 // in the predecessor.
1256 if (TI == InVal || TI->mayHaveSideEffects())
1257 return false;
1258
1259 // If the predecessor has a single successor, then the edge isn't
1260 // critical.
1261 if (TI->getNumSuccessors() == 1)
1262 continue;
1263
1264 // If this pointer is always safe to load, or if we can prove that there
1265 // is already a load in the block, then we can move the load to the pred
1266 // block.
1267 if (isSafeToLoadUnconditionally(InVal, MaxAlign, LoadSize, DL, TI))
1268 continue;
1269
1270 return false;
1271 }
1272
1273 return true;
1274 }
1275
speculatePHINodeLoads(IRBuilderTy & IRB,PHINode & PN)1276 static void speculatePHINodeLoads(IRBuilderTy &IRB, PHINode &PN) {
1277 LLVM_DEBUG(dbgs() << " original: " << PN << "\n");
1278
1279 LoadInst *SomeLoad = cast<LoadInst>(PN.user_back());
1280 Type *LoadTy = SomeLoad->getType();
1281 IRB.SetInsertPoint(&PN);
1282 PHINode *NewPN = IRB.CreatePHI(LoadTy, PN.getNumIncomingValues(),
1283 PN.getName() + ".sroa.speculated");
1284
1285 // Get the AA tags and alignment to use from one of the loads. It does not
1286 // matter which one we get and if any differ.
1287 AAMDNodes AATags = SomeLoad->getAAMetadata();
1288 Align Alignment = SomeLoad->getAlign();
1289
1290 // Rewrite all loads of the PN to use the new PHI.
1291 while (!PN.use_empty()) {
1292 LoadInst *LI = cast<LoadInst>(PN.user_back());
1293 LI->replaceAllUsesWith(NewPN);
1294 LI->eraseFromParent();
1295 }
1296
1297 // Inject loads into all of the pred blocks.
1298 DenseMap<BasicBlock*, Value*> InjectedLoads;
1299 for (unsigned Idx = 0, Num = PN.getNumIncomingValues(); Idx != Num; ++Idx) {
1300 BasicBlock *Pred = PN.getIncomingBlock(Idx);
1301 Value *InVal = PN.getIncomingValue(Idx);
1302
1303 // A PHI node is allowed to have multiple (duplicated) entries for the same
1304 // basic block, as long as the value is the same. So if we already injected
1305 // a load in the predecessor, then we should reuse the same load for all
1306 // duplicated entries.
1307 if (Value* V = InjectedLoads.lookup(Pred)) {
1308 NewPN->addIncoming(V, Pred);
1309 continue;
1310 }
1311
1312 Instruction *TI = Pred->getTerminator();
1313 IRB.SetInsertPoint(TI);
1314
1315 LoadInst *Load = IRB.CreateAlignedLoad(
1316 LoadTy, InVal, Alignment,
1317 (PN.getName() + ".sroa.speculate.load." + Pred->getName()));
1318 ++NumLoadsSpeculated;
1319 if (AATags)
1320 Load->setAAMetadata(AATags);
1321 NewPN->addIncoming(Load, Pred);
1322 InjectedLoads[Pred] = Load;
1323 }
1324
1325 LLVM_DEBUG(dbgs() << " speculated to: " << *NewPN << "\n");
1326 PN.eraseFromParent();
1327 }
1328
1329 /// Select instructions that use an alloca and are subsequently loaded can be
1330 /// rewritten to load both input pointers and then select between the result,
1331 /// allowing the load of the alloca to be promoted.
1332 /// From this:
1333 /// %P2 = select i1 %cond, i32* %Alloca, i32* %Other
1334 /// %V = load i32* %P2
1335 /// to:
1336 /// %V1 = load i32* %Alloca -> will be mem2reg'd
1337 /// %V2 = load i32* %Other
1338 /// %V = select i1 %cond, i32 %V1, i32 %V2
1339 ///
1340 /// We can do this to a select if its only uses are loads and if the operand
1341 /// to the select can be loaded unconditionally. If found an intervening bitcast
1342 /// with a single use of the load, allow the promotion.
isSafeSelectToSpeculate(SelectInst & SI)1343 static bool isSafeSelectToSpeculate(SelectInst &SI) {
1344 Value *TValue = SI.getTrueValue();
1345 Value *FValue = SI.getFalseValue();
1346 const DataLayout &DL = SI.getModule()->getDataLayout();
1347
1348 for (User *U : SI.users()) {
1349 LoadInst *LI;
1350 BitCastInst *BC = dyn_cast<BitCastInst>(U);
1351 if (BC && BC->hasOneUse())
1352 LI = dyn_cast<LoadInst>(*BC->user_begin());
1353 else
1354 LI = dyn_cast<LoadInst>(U);
1355
1356 if (!LI || !LI->isSimple())
1357 return false;
1358
1359 // Both operands to the select need to be dereferenceable, either
1360 // absolutely (e.g. allocas) or at this point because we can see other
1361 // accesses to it.
1362 if (!isSafeToLoadUnconditionally(TValue, LI->getType(),
1363 LI->getAlign(), DL, LI))
1364 return false;
1365 if (!isSafeToLoadUnconditionally(FValue, LI->getType(),
1366 LI->getAlign(), DL, LI))
1367 return false;
1368 }
1369
1370 return true;
1371 }
1372
speculateSelectInstLoads(IRBuilderTy & IRB,SelectInst & SI)1373 static void speculateSelectInstLoads(IRBuilderTy &IRB, SelectInst &SI) {
1374 LLVM_DEBUG(dbgs() << " original: " << SI << "\n");
1375
1376 IRB.SetInsertPoint(&SI);
1377 Value *TV = SI.getTrueValue();
1378 Value *FV = SI.getFalseValue();
1379 // Replace the loads of the select with a select of two loads.
1380 while (!SI.use_empty()) {
1381 LoadInst *LI;
1382 BitCastInst *BC = dyn_cast<BitCastInst>(SI.user_back());
1383 if (BC) {
1384 assert(BC->hasOneUse() && "Bitcast should have a single use.");
1385 LI = cast<LoadInst>(BC->user_back());
1386 } else {
1387 LI = cast<LoadInst>(SI.user_back());
1388 }
1389
1390 assert(LI->isSimple() && "We only speculate simple loads");
1391
1392 IRB.SetInsertPoint(LI);
1393 Value *NewTV =
1394 BC ? IRB.CreateBitCast(TV, BC->getType(), TV->getName() + ".sroa.cast")
1395 : TV;
1396 Value *NewFV =
1397 BC ? IRB.CreateBitCast(FV, BC->getType(), FV->getName() + ".sroa.cast")
1398 : FV;
1399 LoadInst *TL = IRB.CreateLoad(LI->getType(), NewTV,
1400 LI->getName() + ".sroa.speculate.load.true");
1401 LoadInst *FL = IRB.CreateLoad(LI->getType(), NewFV,
1402 LI->getName() + ".sroa.speculate.load.false");
1403 NumLoadsSpeculated += 2;
1404
1405 // Transfer alignment and AA info if present.
1406 TL->setAlignment(LI->getAlign());
1407 FL->setAlignment(LI->getAlign());
1408
1409 AAMDNodes Tags = LI->getAAMetadata();
1410 if (Tags) {
1411 TL->setAAMetadata(Tags);
1412 FL->setAAMetadata(Tags);
1413 }
1414
1415 Value *V = IRB.CreateSelect(SI.getCondition(), TL, FL,
1416 LI->getName() + ".sroa.speculated");
1417
1418 LLVM_DEBUG(dbgs() << " speculated to: " << *V << "\n");
1419 LI->replaceAllUsesWith(V);
1420 LI->eraseFromParent();
1421 if (BC)
1422 BC->eraseFromParent();
1423 }
1424 SI.eraseFromParent();
1425 }
1426
1427 /// Build a GEP out of a base pointer and indices.
1428 ///
1429 /// This will return the BasePtr if that is valid, or build a new GEP
1430 /// instruction using the IRBuilder if GEP-ing is needed.
buildGEP(IRBuilderTy & IRB,Value * BasePtr,SmallVectorImpl<Value * > & Indices,const Twine & NamePrefix)1431 static Value *buildGEP(IRBuilderTy &IRB, Value *BasePtr,
1432 SmallVectorImpl<Value *> &Indices,
1433 const Twine &NamePrefix) {
1434 if (Indices.empty())
1435 return BasePtr;
1436
1437 // A single zero index is a no-op, so check for this and avoid building a GEP
1438 // in that case.
1439 if (Indices.size() == 1 && cast<ConstantInt>(Indices.back())->isZero())
1440 return BasePtr;
1441
1442 // buildGEP() is only called for non-opaque pointers.
1443 return IRB.CreateInBoundsGEP(
1444 BasePtr->getType()->getNonOpaquePointerElementType(), BasePtr, Indices,
1445 NamePrefix + "sroa_idx");
1446 }
1447
1448 /// Get a natural GEP off of the BasePtr walking through Ty toward
1449 /// TargetTy without changing the offset of the pointer.
1450 ///
1451 /// This routine assumes we've already established a properly offset GEP with
1452 /// Indices, and arrived at the Ty type. The goal is to continue to GEP with
1453 /// zero-indices down through type layers until we find one the same as
1454 /// TargetTy. If we can't find one with the same type, we at least try to use
1455 /// one with the same size. If none of that works, we just produce the GEP as
1456 /// indicated by Indices to have the correct offset.
getNaturalGEPWithType(IRBuilderTy & IRB,const DataLayout & DL,Value * BasePtr,Type * Ty,Type * TargetTy,SmallVectorImpl<Value * > & Indices,const Twine & NamePrefix)1457 static Value *getNaturalGEPWithType(IRBuilderTy &IRB, const DataLayout &DL,
1458 Value *BasePtr, Type *Ty, Type *TargetTy,
1459 SmallVectorImpl<Value *> &Indices,
1460 const Twine &NamePrefix) {
1461 if (Ty == TargetTy)
1462 return buildGEP(IRB, BasePtr, Indices, NamePrefix);
1463
1464 // Offset size to use for the indices.
1465 unsigned OffsetSize = DL.getIndexTypeSizeInBits(BasePtr->getType());
1466
1467 // See if we can descend into a struct and locate a field with the correct
1468 // type.
1469 unsigned NumLayers = 0;
1470 Type *ElementTy = Ty;
1471 do {
1472 if (ElementTy->isPointerTy())
1473 break;
1474
1475 if (ArrayType *ArrayTy = dyn_cast<ArrayType>(ElementTy)) {
1476 ElementTy = ArrayTy->getElementType();
1477 Indices.push_back(IRB.getIntN(OffsetSize, 0));
1478 } else if (VectorType *VectorTy = dyn_cast<VectorType>(ElementTy)) {
1479 ElementTy = VectorTy->getElementType();
1480 Indices.push_back(IRB.getInt32(0));
1481 } else if (StructType *STy = dyn_cast<StructType>(ElementTy)) {
1482 if (STy->element_begin() == STy->element_end())
1483 break; // Nothing left to descend into.
1484 ElementTy = *STy->element_begin();
1485 Indices.push_back(IRB.getInt32(0));
1486 } else {
1487 break;
1488 }
1489 ++NumLayers;
1490 } while (ElementTy != TargetTy);
1491 if (ElementTy != TargetTy)
1492 Indices.erase(Indices.end() - NumLayers, Indices.end());
1493
1494 return buildGEP(IRB, BasePtr, Indices, NamePrefix);
1495 }
1496
1497 /// Get a natural GEP from a base pointer to a particular offset and
1498 /// resulting in a particular type.
1499 ///
1500 /// The goal is to produce a "natural" looking GEP that works with the existing
1501 /// composite types to arrive at the appropriate offset and element type for
1502 /// a pointer. TargetTy is the element type the returned GEP should point-to if
1503 /// possible. We recurse by decreasing Offset, adding the appropriate index to
1504 /// Indices, and setting Ty to the result subtype.
1505 ///
1506 /// If no natural GEP can be constructed, this function returns null.
getNaturalGEPWithOffset(IRBuilderTy & IRB,const DataLayout & DL,Value * Ptr,APInt Offset,Type * TargetTy,SmallVectorImpl<Value * > & Indices,const Twine & NamePrefix)1507 static Value *getNaturalGEPWithOffset(IRBuilderTy &IRB, const DataLayout &DL,
1508 Value *Ptr, APInt Offset, Type *TargetTy,
1509 SmallVectorImpl<Value *> &Indices,
1510 const Twine &NamePrefix) {
1511 PointerType *Ty = cast<PointerType>(Ptr->getType());
1512
1513 // Don't consider any GEPs through an i8* as natural unless the TargetTy is
1514 // an i8.
1515 if (Ty == IRB.getInt8PtrTy(Ty->getAddressSpace()) && TargetTy->isIntegerTy(8))
1516 return nullptr;
1517
1518 Type *ElementTy = Ty->getNonOpaquePointerElementType();
1519 if (!ElementTy->isSized())
1520 return nullptr; // We can't GEP through an unsized element.
1521
1522 SmallVector<APInt> IntIndices = DL.getGEPIndicesForOffset(ElementTy, Offset);
1523 if (Offset != 0)
1524 return nullptr;
1525
1526 for (const APInt &Index : IntIndices)
1527 Indices.push_back(IRB.getInt(Index));
1528 return getNaturalGEPWithType(IRB, DL, Ptr, ElementTy, TargetTy, Indices,
1529 NamePrefix);
1530 }
1531
1532 /// Compute an adjusted pointer from Ptr by Offset bytes where the
1533 /// resulting pointer has PointerTy.
1534 ///
1535 /// This tries very hard to compute a "natural" GEP which arrives at the offset
1536 /// and produces the pointer type desired. Where it cannot, it will try to use
1537 /// the natural GEP to arrive at the offset and bitcast to the type. Where that
1538 /// fails, it will try to use an existing i8* and GEP to the byte offset and
1539 /// bitcast to the type.
1540 ///
1541 /// The strategy for finding the more natural GEPs is to peel off layers of the
1542 /// pointer, walking back through bit casts and GEPs, searching for a base
1543 /// pointer from which we can compute a natural GEP with the desired
1544 /// properties. The algorithm tries to fold as many constant indices into
1545 /// a single GEP as possible, thus making each GEP more independent of the
1546 /// surrounding code.
getAdjustedPtr(IRBuilderTy & IRB,const DataLayout & DL,Value * Ptr,APInt Offset,Type * PointerTy,const Twine & NamePrefix)1547 static Value *getAdjustedPtr(IRBuilderTy &IRB, const DataLayout &DL, Value *Ptr,
1548 APInt Offset, Type *PointerTy,
1549 const Twine &NamePrefix) {
1550 // Create i8 GEP for opaque pointers.
1551 if (Ptr->getType()->isOpaquePointerTy()) {
1552 if (Offset != 0)
1553 Ptr = IRB.CreateInBoundsGEP(IRB.getInt8Ty(), Ptr, IRB.getInt(Offset),
1554 NamePrefix + "sroa_idx");
1555 return IRB.CreatePointerBitCastOrAddrSpaceCast(Ptr, PointerTy,
1556 NamePrefix + "sroa_cast");
1557 }
1558
1559 // Even though we don't look through PHI nodes, we could be called on an
1560 // instruction in an unreachable block, which may be on a cycle.
1561 SmallPtrSet<Value *, 4> Visited;
1562 Visited.insert(Ptr);
1563 SmallVector<Value *, 4> Indices;
1564
1565 // We may end up computing an offset pointer that has the wrong type. If we
1566 // never are able to compute one directly that has the correct type, we'll
1567 // fall back to it, so keep it and the base it was computed from around here.
1568 Value *OffsetPtr = nullptr;
1569 Value *OffsetBasePtr;
1570
1571 // Remember any i8 pointer we come across to re-use if we need to do a raw
1572 // byte offset.
1573 Value *Int8Ptr = nullptr;
1574 APInt Int8PtrOffset(Offset.getBitWidth(), 0);
1575
1576 PointerType *TargetPtrTy = cast<PointerType>(PointerTy);
1577 Type *TargetTy = TargetPtrTy->getNonOpaquePointerElementType();
1578
1579 // As `addrspacecast` is , `Ptr` (the storage pointer) may have different
1580 // address space from the expected `PointerTy` (the pointer to be used).
1581 // Adjust the pointer type based the original storage pointer.
1582 auto AS = cast<PointerType>(Ptr->getType())->getAddressSpace();
1583 PointerTy = TargetTy->getPointerTo(AS);
1584
1585 do {
1586 // First fold any existing GEPs into the offset.
1587 while (GEPOperator *GEP = dyn_cast<GEPOperator>(Ptr)) {
1588 APInt GEPOffset(Offset.getBitWidth(), 0);
1589 if (!GEP->accumulateConstantOffset(DL, GEPOffset))
1590 break;
1591 Offset += GEPOffset;
1592 Ptr = GEP->getPointerOperand();
1593 if (!Visited.insert(Ptr).second)
1594 break;
1595 }
1596
1597 // See if we can perform a natural GEP here.
1598 Indices.clear();
1599 if (Value *P = getNaturalGEPWithOffset(IRB, DL, Ptr, Offset, TargetTy,
1600 Indices, NamePrefix)) {
1601 // If we have a new natural pointer at the offset, clear out any old
1602 // offset pointer we computed. Unless it is the base pointer or
1603 // a non-instruction, we built a GEP we don't need. Zap it.
1604 if (OffsetPtr && OffsetPtr != OffsetBasePtr)
1605 if (Instruction *I = dyn_cast<Instruction>(OffsetPtr)) {
1606 assert(I->use_empty() && "Built a GEP with uses some how!");
1607 I->eraseFromParent();
1608 }
1609 OffsetPtr = P;
1610 OffsetBasePtr = Ptr;
1611 // If we also found a pointer of the right type, we're done.
1612 if (P->getType() == PointerTy)
1613 break;
1614 }
1615
1616 // Stash this pointer if we've found an i8*.
1617 if (Ptr->getType()->isIntegerTy(8)) {
1618 Int8Ptr = Ptr;
1619 Int8PtrOffset = Offset;
1620 }
1621
1622 // Peel off a layer of the pointer and update the offset appropriately.
1623 if (Operator::getOpcode(Ptr) == Instruction::BitCast) {
1624 Ptr = cast<Operator>(Ptr)->getOperand(0);
1625 } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(Ptr)) {
1626 if (GA->isInterposable())
1627 break;
1628 Ptr = GA->getAliasee();
1629 } else {
1630 break;
1631 }
1632 assert(Ptr->getType()->isPointerTy() && "Unexpected operand type!");
1633 } while (Visited.insert(Ptr).second);
1634
1635 if (!OffsetPtr) {
1636 if (!Int8Ptr) {
1637 Int8Ptr = IRB.CreateBitCast(
1638 Ptr, IRB.getInt8PtrTy(PointerTy->getPointerAddressSpace()),
1639 NamePrefix + "sroa_raw_cast");
1640 Int8PtrOffset = Offset;
1641 }
1642
1643 OffsetPtr = Int8PtrOffset == 0
1644 ? Int8Ptr
1645 : IRB.CreateInBoundsGEP(IRB.getInt8Ty(), Int8Ptr,
1646 IRB.getInt(Int8PtrOffset),
1647 NamePrefix + "sroa_raw_idx");
1648 }
1649 Ptr = OffsetPtr;
1650
1651 // On the off chance we were targeting i8*, guard the bitcast here.
1652 if (cast<PointerType>(Ptr->getType()) != TargetPtrTy) {
1653 Ptr = IRB.CreatePointerBitCastOrAddrSpaceCast(Ptr,
1654 TargetPtrTy,
1655 NamePrefix + "sroa_cast");
1656 }
1657
1658 return Ptr;
1659 }
1660
1661 /// Compute the adjusted alignment for a load or store from an offset.
getAdjustedAlignment(Instruction * I,uint64_t Offset)1662 static Align getAdjustedAlignment(Instruction *I, uint64_t Offset) {
1663 return commonAlignment(getLoadStoreAlignment(I), Offset);
1664 }
1665
1666 /// Test whether we can convert a value from the old to the new type.
1667 ///
1668 /// This predicate should be used to guard calls to convertValue in order to
1669 /// ensure that we only try to convert viable values. The strategy is that we
1670 /// will peel off single element struct and array wrappings to get to an
1671 /// underlying value, and convert that value.
canConvertValue(const DataLayout & DL,Type * OldTy,Type * NewTy)1672 static bool canConvertValue(const DataLayout &DL, Type *OldTy, Type *NewTy) {
1673 if (OldTy == NewTy)
1674 return true;
1675
1676 // For integer types, we can't handle any bit-width differences. This would
1677 // break both vector conversions with extension and introduce endianness
1678 // issues when in conjunction with loads and stores.
1679 if (isa<IntegerType>(OldTy) && isa<IntegerType>(NewTy)) {
1680 assert(cast<IntegerType>(OldTy)->getBitWidth() !=
1681 cast<IntegerType>(NewTy)->getBitWidth() &&
1682 "We can't have the same bitwidth for different int types");
1683 return false;
1684 }
1685
1686 if (DL.getTypeSizeInBits(NewTy).getFixedSize() !=
1687 DL.getTypeSizeInBits(OldTy).getFixedSize())
1688 return false;
1689 if (!NewTy->isSingleValueType() || !OldTy->isSingleValueType())
1690 return false;
1691
1692 // We can convert pointers to integers and vice-versa. Same for vectors
1693 // of pointers and integers.
1694 OldTy = OldTy->getScalarType();
1695 NewTy = NewTy->getScalarType();
1696 if (NewTy->isPointerTy() || OldTy->isPointerTy()) {
1697 if (NewTy->isPointerTy() && OldTy->isPointerTy()) {
1698 unsigned OldAS = OldTy->getPointerAddressSpace();
1699 unsigned NewAS = NewTy->getPointerAddressSpace();
1700 // Convert pointers if they are pointers from the same address space or
1701 // different integral (not non-integral) address spaces with the same
1702 // pointer size.
1703 return OldAS == NewAS ||
1704 (!DL.isNonIntegralAddressSpace(OldAS) &&
1705 !DL.isNonIntegralAddressSpace(NewAS) &&
1706 DL.getPointerSize(OldAS) == DL.getPointerSize(NewAS));
1707 }
1708
1709 // We can convert integers to integral pointers, but not to non-integral
1710 // pointers.
1711 if (OldTy->isIntegerTy())
1712 return !DL.isNonIntegralPointerType(NewTy);
1713
1714 // We can convert integral pointers to integers, but non-integral pointers
1715 // need to remain pointers.
1716 if (!DL.isNonIntegralPointerType(OldTy))
1717 return NewTy->isIntegerTy();
1718
1719 return false;
1720 }
1721
1722 return true;
1723 }
1724
1725 /// Generic routine to convert an SSA value to a value of a different
1726 /// type.
1727 ///
1728 /// This will try various different casting techniques, such as bitcasts,
1729 /// inttoptr, and ptrtoint casts. Use the \c canConvertValue predicate to test
1730 /// two types for viability with this routine.
convertValue(const DataLayout & DL,IRBuilderTy & IRB,Value * V,Type * NewTy)1731 static Value *convertValue(const DataLayout &DL, IRBuilderTy &IRB, Value *V,
1732 Type *NewTy) {
1733 Type *OldTy = V->getType();
1734 assert(canConvertValue(DL, OldTy, NewTy) && "Value not convertable to type");
1735
1736 if (OldTy == NewTy)
1737 return V;
1738
1739 assert(!(isa<IntegerType>(OldTy) && isa<IntegerType>(NewTy)) &&
1740 "Integer types must be the exact same to convert.");
1741
1742 // See if we need inttoptr for this type pair. May require additional bitcast.
1743 if (OldTy->isIntOrIntVectorTy() && NewTy->isPtrOrPtrVectorTy()) {
1744 // Expand <2 x i32> to i8* --> <2 x i32> to i64 to i8*
1745 // Expand i128 to <2 x i8*> --> i128 to <2 x i64> to <2 x i8*>
1746 // Expand <4 x i32> to <2 x i8*> --> <4 x i32> to <2 x i64> to <2 x i8*>
1747 // Directly handle i64 to i8*
1748 return IRB.CreateIntToPtr(IRB.CreateBitCast(V, DL.getIntPtrType(NewTy)),
1749 NewTy);
1750 }
1751
1752 // See if we need ptrtoint for this type pair. May require additional bitcast.
1753 if (OldTy->isPtrOrPtrVectorTy() && NewTy->isIntOrIntVectorTy()) {
1754 // Expand <2 x i8*> to i128 --> <2 x i8*> to <2 x i64> to i128
1755 // Expand i8* to <2 x i32> --> i8* to i64 to <2 x i32>
1756 // Expand <2 x i8*> to <4 x i32> --> <2 x i8*> to <2 x i64> to <4 x i32>
1757 // Expand i8* to i64 --> i8* to i64 to i64
1758 return IRB.CreateBitCast(IRB.CreatePtrToInt(V, DL.getIntPtrType(OldTy)),
1759 NewTy);
1760 }
1761
1762 if (OldTy->isPtrOrPtrVectorTy() && NewTy->isPtrOrPtrVectorTy()) {
1763 unsigned OldAS = OldTy->getPointerAddressSpace();
1764 unsigned NewAS = NewTy->getPointerAddressSpace();
1765 // To convert pointers with different address spaces (they are already
1766 // checked convertible, i.e. they have the same pointer size), so far we
1767 // cannot use `bitcast` (which has restrict on the same address space) or
1768 // `addrspacecast` (which is not always no-op casting). Instead, use a pair
1769 // of no-op `ptrtoint`/`inttoptr` casts through an integer with the same bit
1770 // size.
1771 if (OldAS != NewAS) {
1772 assert(DL.getPointerSize(OldAS) == DL.getPointerSize(NewAS));
1773 return IRB.CreateIntToPtr(IRB.CreatePtrToInt(V, DL.getIntPtrType(OldTy)),
1774 NewTy);
1775 }
1776 }
1777
1778 return IRB.CreateBitCast(V, NewTy);
1779 }
1780
1781 /// Test whether the given slice use can be promoted to a vector.
1782 ///
1783 /// This function is called to test each entry in a partition which is slated
1784 /// for a single slice.
isVectorPromotionViableForSlice(Partition & P,const Slice & S,VectorType * Ty,uint64_t ElementSize,const DataLayout & DL)1785 static bool isVectorPromotionViableForSlice(Partition &P, const Slice &S,
1786 VectorType *Ty,
1787 uint64_t ElementSize,
1788 const DataLayout &DL) {
1789 // First validate the slice offsets.
1790 uint64_t BeginOffset =
1791 std::max(S.beginOffset(), P.beginOffset()) - P.beginOffset();
1792 uint64_t BeginIndex = BeginOffset / ElementSize;
1793 if (BeginIndex * ElementSize != BeginOffset ||
1794 BeginIndex >= cast<FixedVectorType>(Ty)->getNumElements())
1795 return false;
1796 uint64_t EndOffset =
1797 std::min(S.endOffset(), P.endOffset()) - P.beginOffset();
1798 uint64_t EndIndex = EndOffset / ElementSize;
1799 if (EndIndex * ElementSize != EndOffset ||
1800 EndIndex > cast<FixedVectorType>(Ty)->getNumElements())
1801 return false;
1802
1803 assert(EndIndex > BeginIndex && "Empty vector!");
1804 uint64_t NumElements = EndIndex - BeginIndex;
1805 Type *SliceTy = (NumElements == 1)
1806 ? Ty->getElementType()
1807 : FixedVectorType::get(Ty->getElementType(), NumElements);
1808
1809 Type *SplitIntTy =
1810 Type::getIntNTy(Ty->getContext(), NumElements * ElementSize * 8);
1811
1812 Use *U = S.getUse();
1813
1814 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(U->getUser())) {
1815 if (MI->isVolatile())
1816 return false;
1817 if (!S.isSplittable())
1818 return false; // Skip any unsplittable intrinsics.
1819 } else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(U->getUser())) {
1820 if (!II->isLifetimeStartOrEnd() && !II->isDroppable())
1821 return false;
1822 } else if (LoadInst *LI = dyn_cast<LoadInst>(U->getUser())) {
1823 if (LI->isVolatile())
1824 return false;
1825 Type *LTy = LI->getType();
1826 // Disable vector promotion when there are loads or stores of an FCA.
1827 if (LTy->isStructTy())
1828 return false;
1829 if (P.beginOffset() > S.beginOffset() || P.endOffset() < S.endOffset()) {
1830 assert(LTy->isIntegerTy());
1831 LTy = SplitIntTy;
1832 }
1833 if (!canConvertValue(DL, SliceTy, LTy))
1834 return false;
1835 } else if (StoreInst *SI = dyn_cast<StoreInst>(U->getUser())) {
1836 if (SI->isVolatile())
1837 return false;
1838 Type *STy = SI->getValueOperand()->getType();
1839 // Disable vector promotion when there are loads or stores of an FCA.
1840 if (STy->isStructTy())
1841 return false;
1842 if (P.beginOffset() > S.beginOffset() || P.endOffset() < S.endOffset()) {
1843 assert(STy->isIntegerTy());
1844 STy = SplitIntTy;
1845 }
1846 if (!canConvertValue(DL, STy, SliceTy))
1847 return false;
1848 } else {
1849 return false;
1850 }
1851
1852 return true;
1853 }
1854
1855 /// Test whether the given alloca partitioning and range of slices can be
1856 /// promoted to a vector.
1857 ///
1858 /// This is a quick test to check whether we can rewrite a particular alloca
1859 /// partition (and its newly formed alloca) into a vector alloca with only
1860 /// whole-vector loads and stores such that it could be promoted to a vector
1861 /// SSA value. We only can ensure this for a limited set of operations, and we
1862 /// don't want to do the rewrites unless we are confident that the result will
1863 /// be promotable, so we have an early test here.
isVectorPromotionViable(Partition & P,const DataLayout & DL)1864 static VectorType *isVectorPromotionViable(Partition &P, const DataLayout &DL) {
1865 // Collect the candidate types for vector-based promotion. Also track whether
1866 // we have different element types.
1867 SmallVector<VectorType *, 4> CandidateTys;
1868 Type *CommonEltTy = nullptr;
1869 bool HaveCommonEltTy = true;
1870 auto CheckCandidateType = [&](Type *Ty) {
1871 if (auto *VTy = dyn_cast<VectorType>(Ty)) {
1872 // Return if bitcast to vectors is different for total size in bits.
1873 if (!CandidateTys.empty()) {
1874 VectorType *V = CandidateTys[0];
1875 if (DL.getTypeSizeInBits(VTy).getFixedSize() !=
1876 DL.getTypeSizeInBits(V).getFixedSize()) {
1877 CandidateTys.clear();
1878 return;
1879 }
1880 }
1881 CandidateTys.push_back(VTy);
1882 if (!CommonEltTy)
1883 CommonEltTy = VTy->getElementType();
1884 else if (CommonEltTy != VTy->getElementType())
1885 HaveCommonEltTy = false;
1886 }
1887 };
1888 // Consider any loads or stores that are the exact size of the slice.
1889 for (const Slice &S : P)
1890 if (S.beginOffset() == P.beginOffset() &&
1891 S.endOffset() == P.endOffset()) {
1892 if (auto *LI = dyn_cast<LoadInst>(S.getUse()->getUser()))
1893 CheckCandidateType(LI->getType());
1894 else if (auto *SI = dyn_cast<StoreInst>(S.getUse()->getUser()))
1895 CheckCandidateType(SI->getValueOperand()->getType());
1896 }
1897
1898 // If we didn't find a vector type, nothing to do here.
1899 if (CandidateTys.empty())
1900 return nullptr;
1901
1902 // Remove non-integer vector types if we had multiple common element types.
1903 // FIXME: It'd be nice to replace them with integer vector types, but we can't
1904 // do that until all the backends are known to produce good code for all
1905 // integer vector types.
1906 if (!HaveCommonEltTy) {
1907 llvm::erase_if(CandidateTys, [](VectorType *VTy) {
1908 return !VTy->getElementType()->isIntegerTy();
1909 });
1910
1911 // If there were no integer vector types, give up.
1912 if (CandidateTys.empty())
1913 return nullptr;
1914
1915 // Rank the remaining candidate vector types. This is easy because we know
1916 // they're all integer vectors. We sort by ascending number of elements.
1917 auto RankVectorTypes = [&DL](VectorType *RHSTy, VectorType *LHSTy) {
1918 (void)DL;
1919 assert(DL.getTypeSizeInBits(RHSTy).getFixedSize() ==
1920 DL.getTypeSizeInBits(LHSTy).getFixedSize() &&
1921 "Cannot have vector types of different sizes!");
1922 assert(RHSTy->getElementType()->isIntegerTy() &&
1923 "All non-integer types eliminated!");
1924 assert(LHSTy->getElementType()->isIntegerTy() &&
1925 "All non-integer types eliminated!");
1926 return cast<FixedVectorType>(RHSTy)->getNumElements() <
1927 cast<FixedVectorType>(LHSTy)->getNumElements();
1928 };
1929 llvm::sort(CandidateTys, RankVectorTypes);
1930 CandidateTys.erase(
1931 std::unique(CandidateTys.begin(), CandidateTys.end(), RankVectorTypes),
1932 CandidateTys.end());
1933 } else {
1934 // The only way to have the same element type in every vector type is to
1935 // have the same vector type. Check that and remove all but one.
1936 #ifndef NDEBUG
1937 for (VectorType *VTy : CandidateTys) {
1938 assert(VTy->getElementType() == CommonEltTy &&
1939 "Unaccounted for element type!");
1940 assert(VTy == CandidateTys[0] &&
1941 "Different vector types with the same element type!");
1942 }
1943 #endif
1944 CandidateTys.resize(1);
1945 }
1946
1947 // Try each vector type, and return the one which works.
1948 auto CheckVectorTypeForPromotion = [&](VectorType *VTy) {
1949 uint64_t ElementSize =
1950 DL.getTypeSizeInBits(VTy->getElementType()).getFixedSize();
1951
1952 // While the definition of LLVM vectors is bitpacked, we don't support sizes
1953 // that aren't byte sized.
1954 if (ElementSize % 8)
1955 return false;
1956 assert((DL.getTypeSizeInBits(VTy).getFixedSize() % 8) == 0 &&
1957 "vector size not a multiple of element size?");
1958 ElementSize /= 8;
1959
1960 for (const Slice &S : P)
1961 if (!isVectorPromotionViableForSlice(P, S, VTy, ElementSize, DL))
1962 return false;
1963
1964 for (const Slice *S : P.splitSliceTails())
1965 if (!isVectorPromotionViableForSlice(P, *S, VTy, ElementSize, DL))
1966 return false;
1967
1968 return true;
1969 };
1970 for (VectorType *VTy : CandidateTys)
1971 if (CheckVectorTypeForPromotion(VTy))
1972 return VTy;
1973
1974 return nullptr;
1975 }
1976
1977 /// Test whether a slice of an alloca is valid for integer widening.
1978 ///
1979 /// This implements the necessary checking for the \c isIntegerWideningViable
1980 /// test below on a single slice of the alloca.
isIntegerWideningViableForSlice(const Slice & S,uint64_t AllocBeginOffset,Type * AllocaTy,const DataLayout & DL,bool & WholeAllocaOp)1981 static bool isIntegerWideningViableForSlice(const Slice &S,
1982 uint64_t AllocBeginOffset,
1983 Type *AllocaTy,
1984 const DataLayout &DL,
1985 bool &WholeAllocaOp) {
1986 uint64_t Size = DL.getTypeStoreSize(AllocaTy).getFixedSize();
1987
1988 uint64_t RelBegin = S.beginOffset() - AllocBeginOffset;
1989 uint64_t RelEnd = S.endOffset() - AllocBeginOffset;
1990
1991 Use *U = S.getUse();
1992
1993 // Lifetime intrinsics operate over the whole alloca whose sizes are usually
1994 // larger than other load/store slices (RelEnd > Size). But lifetime are
1995 // always promotable and should not impact other slices' promotability of the
1996 // partition.
1997 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(U->getUser())) {
1998 if (II->isLifetimeStartOrEnd() || II->isDroppable())
1999 return true;
2000 }
2001
2002 // We can't reasonably handle cases where the load or store extends past
2003 // the end of the alloca's type and into its padding.
2004 if (RelEnd > Size)
2005 return false;
2006
2007 if (LoadInst *LI = dyn_cast<LoadInst>(U->getUser())) {
2008 if (LI->isVolatile())
2009 return false;
2010 // We can't handle loads that extend past the allocated memory.
2011 if (DL.getTypeStoreSize(LI->getType()).getFixedSize() > Size)
2012 return false;
2013 // So far, AllocaSliceRewriter does not support widening split slice tails
2014 // in rewriteIntegerLoad.
2015 if (S.beginOffset() < AllocBeginOffset)
2016 return false;
2017 // Note that we don't count vector loads or stores as whole-alloca
2018 // operations which enable integer widening because we would prefer to use
2019 // vector widening instead.
2020 if (!isa<VectorType>(LI->getType()) && RelBegin == 0 && RelEnd == Size)
2021 WholeAllocaOp = true;
2022 if (IntegerType *ITy = dyn_cast<IntegerType>(LI->getType())) {
2023 if (ITy->getBitWidth() < DL.getTypeStoreSizeInBits(ITy).getFixedSize())
2024 return false;
2025 } else if (RelBegin != 0 || RelEnd != Size ||
2026 !canConvertValue(DL, AllocaTy, LI->getType())) {
2027 // Non-integer loads need to be convertible from the alloca type so that
2028 // they are promotable.
2029 return false;
2030 }
2031 } else if (StoreInst *SI = dyn_cast<StoreInst>(U->getUser())) {
2032 Type *ValueTy = SI->getValueOperand()->getType();
2033 if (SI->isVolatile())
2034 return false;
2035 // We can't handle stores that extend past the allocated memory.
2036 if (DL.getTypeStoreSize(ValueTy).getFixedSize() > Size)
2037 return false;
2038 // So far, AllocaSliceRewriter does not support widening split slice tails
2039 // in rewriteIntegerStore.
2040 if (S.beginOffset() < AllocBeginOffset)
2041 return false;
2042 // Note that we don't count vector loads or stores as whole-alloca
2043 // operations which enable integer widening because we would prefer to use
2044 // vector widening instead.
2045 if (!isa<VectorType>(ValueTy) && RelBegin == 0 && RelEnd == Size)
2046 WholeAllocaOp = true;
2047 if (IntegerType *ITy = dyn_cast<IntegerType>(ValueTy)) {
2048 if (ITy->getBitWidth() < DL.getTypeStoreSizeInBits(ITy).getFixedSize())
2049 return false;
2050 } else if (RelBegin != 0 || RelEnd != Size ||
2051 !canConvertValue(DL, ValueTy, AllocaTy)) {
2052 // Non-integer stores need to be convertible to the alloca type so that
2053 // they are promotable.
2054 return false;
2055 }
2056 } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(U->getUser())) {
2057 if (MI->isVolatile() || !isa<Constant>(MI->getLength()))
2058 return false;
2059 if (!S.isSplittable())
2060 return false; // Skip any unsplittable intrinsics.
2061 } else {
2062 return false;
2063 }
2064
2065 return true;
2066 }
2067
2068 /// Test whether the given alloca partition's integer operations can be
2069 /// widened to promotable ones.
2070 ///
2071 /// This is a quick test to check whether we can rewrite the integer loads and
2072 /// stores to a particular alloca into wider loads and stores and be able to
2073 /// promote the resulting alloca.
isIntegerWideningViable(Partition & P,Type * AllocaTy,const DataLayout & DL)2074 static bool isIntegerWideningViable(Partition &P, Type *AllocaTy,
2075 const DataLayout &DL) {
2076 uint64_t SizeInBits = DL.getTypeSizeInBits(AllocaTy).getFixedSize();
2077 // Don't create integer types larger than the maximum bitwidth.
2078 if (SizeInBits > IntegerType::MAX_INT_BITS)
2079 return false;
2080
2081 // Don't try to handle allocas with bit-padding.
2082 if (SizeInBits != DL.getTypeStoreSizeInBits(AllocaTy).getFixedSize())
2083 return false;
2084
2085 // We need to ensure that an integer type with the appropriate bitwidth can
2086 // be converted to the alloca type, whatever that is. We don't want to force
2087 // the alloca itself to have an integer type if there is a more suitable one.
2088 Type *IntTy = Type::getIntNTy(AllocaTy->getContext(), SizeInBits);
2089 if (!canConvertValue(DL, AllocaTy, IntTy) ||
2090 !canConvertValue(DL, IntTy, AllocaTy))
2091 return false;
2092
2093 // While examining uses, we ensure that the alloca has a covering load or
2094 // store. We don't want to widen the integer operations only to fail to
2095 // promote due to some other unsplittable entry (which we may make splittable
2096 // later). However, if there are only splittable uses, go ahead and assume
2097 // that we cover the alloca.
2098 // FIXME: We shouldn't consider split slices that happen to start in the
2099 // partition here...
2100 bool WholeAllocaOp = P.empty() && DL.isLegalInteger(SizeInBits);
2101
2102 for (const Slice &S : P)
2103 if (!isIntegerWideningViableForSlice(S, P.beginOffset(), AllocaTy, DL,
2104 WholeAllocaOp))
2105 return false;
2106
2107 for (const Slice *S : P.splitSliceTails())
2108 if (!isIntegerWideningViableForSlice(*S, P.beginOffset(), AllocaTy, DL,
2109 WholeAllocaOp))
2110 return false;
2111
2112 return WholeAllocaOp;
2113 }
2114
extractInteger(const DataLayout & DL,IRBuilderTy & IRB,Value * V,IntegerType * Ty,uint64_t Offset,const Twine & Name)2115 static Value *extractInteger(const DataLayout &DL, IRBuilderTy &IRB, Value *V,
2116 IntegerType *Ty, uint64_t Offset,
2117 const Twine &Name) {
2118 LLVM_DEBUG(dbgs() << " start: " << *V << "\n");
2119 IntegerType *IntTy = cast<IntegerType>(V->getType());
2120 assert(DL.getTypeStoreSize(Ty).getFixedSize() + Offset <=
2121 DL.getTypeStoreSize(IntTy).getFixedSize() &&
2122 "Element extends past full value");
2123 uint64_t ShAmt = 8 * Offset;
2124 if (DL.isBigEndian())
2125 ShAmt = 8 * (DL.getTypeStoreSize(IntTy).getFixedSize() -
2126 DL.getTypeStoreSize(Ty).getFixedSize() - Offset);
2127 if (ShAmt) {
2128 V = IRB.CreateLShr(V, ShAmt, Name + ".shift");
2129 LLVM_DEBUG(dbgs() << " shifted: " << *V << "\n");
2130 }
2131 assert(Ty->getBitWidth() <= IntTy->getBitWidth() &&
2132 "Cannot extract to a larger integer!");
2133 if (Ty != IntTy) {
2134 V = IRB.CreateTrunc(V, Ty, Name + ".trunc");
2135 LLVM_DEBUG(dbgs() << " trunced: " << *V << "\n");
2136 }
2137 return V;
2138 }
2139
insertInteger(const DataLayout & DL,IRBuilderTy & IRB,Value * Old,Value * V,uint64_t Offset,const Twine & Name)2140 static Value *insertInteger(const DataLayout &DL, IRBuilderTy &IRB, Value *Old,
2141 Value *V, uint64_t Offset, const Twine &Name) {
2142 IntegerType *IntTy = cast<IntegerType>(Old->getType());
2143 IntegerType *Ty = cast<IntegerType>(V->getType());
2144 assert(Ty->getBitWidth() <= IntTy->getBitWidth() &&
2145 "Cannot insert a larger integer!");
2146 LLVM_DEBUG(dbgs() << " start: " << *V << "\n");
2147 if (Ty != IntTy) {
2148 V = IRB.CreateZExt(V, IntTy, Name + ".ext");
2149 LLVM_DEBUG(dbgs() << " extended: " << *V << "\n");
2150 }
2151 assert(DL.getTypeStoreSize(Ty).getFixedSize() + Offset <=
2152 DL.getTypeStoreSize(IntTy).getFixedSize() &&
2153 "Element store outside of alloca store");
2154 uint64_t ShAmt = 8 * Offset;
2155 if (DL.isBigEndian())
2156 ShAmt = 8 * (DL.getTypeStoreSize(IntTy).getFixedSize() -
2157 DL.getTypeStoreSize(Ty).getFixedSize() - Offset);
2158 if (ShAmt) {
2159 V = IRB.CreateShl(V, ShAmt, Name + ".shift");
2160 LLVM_DEBUG(dbgs() << " shifted: " << *V << "\n");
2161 }
2162
2163 if (ShAmt || Ty->getBitWidth() < IntTy->getBitWidth()) {
2164 APInt Mask = ~Ty->getMask().zext(IntTy->getBitWidth()).shl(ShAmt);
2165 Old = IRB.CreateAnd(Old, Mask, Name + ".mask");
2166 LLVM_DEBUG(dbgs() << " masked: " << *Old << "\n");
2167 V = IRB.CreateOr(Old, V, Name + ".insert");
2168 LLVM_DEBUG(dbgs() << " inserted: " << *V << "\n");
2169 }
2170 return V;
2171 }
2172
extractVector(IRBuilderTy & IRB,Value * V,unsigned BeginIndex,unsigned EndIndex,const Twine & Name)2173 static Value *extractVector(IRBuilderTy &IRB, Value *V, unsigned BeginIndex,
2174 unsigned EndIndex, const Twine &Name) {
2175 auto *VecTy = cast<FixedVectorType>(V->getType());
2176 unsigned NumElements = EndIndex - BeginIndex;
2177 assert(NumElements <= VecTy->getNumElements() && "Too many elements!");
2178
2179 if (NumElements == VecTy->getNumElements())
2180 return V;
2181
2182 if (NumElements == 1) {
2183 V = IRB.CreateExtractElement(V, IRB.getInt32(BeginIndex),
2184 Name + ".extract");
2185 LLVM_DEBUG(dbgs() << " extract: " << *V << "\n");
2186 return V;
2187 }
2188
2189 auto Mask = llvm::to_vector<8>(llvm::seq<int>(BeginIndex, EndIndex));
2190 V = IRB.CreateShuffleVector(V, Mask, Name + ".extract");
2191 LLVM_DEBUG(dbgs() << " shuffle: " << *V << "\n");
2192 return V;
2193 }
2194
insertVector(IRBuilderTy & IRB,Value * Old,Value * V,unsigned BeginIndex,const Twine & Name)2195 static Value *insertVector(IRBuilderTy &IRB, Value *Old, Value *V,
2196 unsigned BeginIndex, const Twine &Name) {
2197 VectorType *VecTy = cast<VectorType>(Old->getType());
2198 assert(VecTy && "Can only insert a vector into a vector");
2199
2200 VectorType *Ty = dyn_cast<VectorType>(V->getType());
2201 if (!Ty) {
2202 // Single element to insert.
2203 V = IRB.CreateInsertElement(Old, V, IRB.getInt32(BeginIndex),
2204 Name + ".insert");
2205 LLVM_DEBUG(dbgs() << " insert: " << *V << "\n");
2206 return V;
2207 }
2208
2209 assert(cast<FixedVectorType>(Ty)->getNumElements() <=
2210 cast<FixedVectorType>(VecTy)->getNumElements() &&
2211 "Too many elements!");
2212 if (cast<FixedVectorType>(Ty)->getNumElements() ==
2213 cast<FixedVectorType>(VecTy)->getNumElements()) {
2214 assert(V->getType() == VecTy && "Vector type mismatch");
2215 return V;
2216 }
2217 unsigned EndIndex = BeginIndex + cast<FixedVectorType>(Ty)->getNumElements();
2218
2219 // When inserting a smaller vector into the larger to store, we first
2220 // use a shuffle vector to widen it with undef elements, and then
2221 // a second shuffle vector to select between the loaded vector and the
2222 // incoming vector.
2223 SmallVector<int, 8> Mask;
2224 Mask.reserve(cast<FixedVectorType>(VecTy)->getNumElements());
2225 for (unsigned i = 0; i != cast<FixedVectorType>(VecTy)->getNumElements(); ++i)
2226 if (i >= BeginIndex && i < EndIndex)
2227 Mask.push_back(i - BeginIndex);
2228 else
2229 Mask.push_back(-1);
2230 V = IRB.CreateShuffleVector(V, Mask, Name + ".expand");
2231 LLVM_DEBUG(dbgs() << " shuffle: " << *V << "\n");
2232
2233 SmallVector<Constant *, 8> Mask2;
2234 Mask2.reserve(cast<FixedVectorType>(VecTy)->getNumElements());
2235 for (unsigned i = 0; i != cast<FixedVectorType>(VecTy)->getNumElements(); ++i)
2236 Mask2.push_back(IRB.getInt1(i >= BeginIndex && i < EndIndex));
2237
2238 V = IRB.CreateSelect(ConstantVector::get(Mask2), V, Old, Name + "blend");
2239
2240 LLVM_DEBUG(dbgs() << " blend: " << *V << "\n");
2241 return V;
2242 }
2243
2244 /// Visitor to rewrite instructions using p particular slice of an alloca
2245 /// to use a new alloca.
2246 ///
2247 /// Also implements the rewriting to vector-based accesses when the partition
2248 /// passes the isVectorPromotionViable predicate. Most of the rewriting logic
2249 /// lives here.
2250 class llvm::sroa::AllocaSliceRewriter
2251 : public InstVisitor<AllocaSliceRewriter, bool> {
2252 // Befriend the base class so it can delegate to private visit methods.
2253 friend class InstVisitor<AllocaSliceRewriter, bool>;
2254
2255 using Base = InstVisitor<AllocaSliceRewriter, bool>;
2256
2257 const DataLayout &DL;
2258 AllocaSlices &AS;
2259 SROAPass &Pass;
2260 AllocaInst &OldAI, &NewAI;
2261 const uint64_t NewAllocaBeginOffset, NewAllocaEndOffset;
2262 Type *NewAllocaTy;
2263
2264 // This is a convenience and flag variable that will be null unless the new
2265 // alloca's integer operations should be widened to this integer type due to
2266 // passing isIntegerWideningViable above. If it is non-null, the desired
2267 // integer type will be stored here for easy access during rewriting.
2268 IntegerType *IntTy;
2269
2270 // If we are rewriting an alloca partition which can be written as pure
2271 // vector operations, we stash extra information here. When VecTy is
2272 // non-null, we have some strict guarantees about the rewritten alloca:
2273 // - The new alloca is exactly the size of the vector type here.
2274 // - The accesses all either map to the entire vector or to a single
2275 // element.
2276 // - The set of accessing instructions is only one of those handled above
2277 // in isVectorPromotionViable. Generally these are the same access kinds
2278 // which are promotable via mem2reg.
2279 VectorType *VecTy;
2280 Type *ElementTy;
2281 uint64_t ElementSize;
2282
2283 // The original offset of the slice currently being rewritten relative to
2284 // the original alloca.
2285 uint64_t BeginOffset = 0;
2286 uint64_t EndOffset = 0;
2287
2288 // The new offsets of the slice currently being rewritten relative to the
2289 // original alloca.
2290 uint64_t NewBeginOffset = 0, NewEndOffset = 0;
2291
2292 uint64_t SliceSize = 0;
2293 bool IsSplittable = false;
2294 bool IsSplit = false;
2295 Use *OldUse = nullptr;
2296 Instruction *OldPtr = nullptr;
2297
2298 // Track post-rewrite users which are PHI nodes and Selects.
2299 SmallSetVector<PHINode *, 8> &PHIUsers;
2300 SmallSetVector<SelectInst *, 8> &SelectUsers;
2301
2302 // Utility IR builder, whose name prefix is setup for each visited use, and
2303 // the insertion point is set to point to the user.
2304 IRBuilderTy IRB;
2305
2306 public:
AllocaSliceRewriter(const DataLayout & DL,AllocaSlices & AS,SROAPass & Pass,AllocaInst & OldAI,AllocaInst & NewAI,uint64_t NewAllocaBeginOffset,uint64_t NewAllocaEndOffset,bool IsIntegerPromotable,VectorType * PromotableVecTy,SmallSetVector<PHINode *,8> & PHIUsers,SmallSetVector<SelectInst *,8> & SelectUsers)2307 AllocaSliceRewriter(const DataLayout &DL, AllocaSlices &AS, SROAPass &Pass,
2308 AllocaInst &OldAI, AllocaInst &NewAI,
2309 uint64_t NewAllocaBeginOffset,
2310 uint64_t NewAllocaEndOffset, bool IsIntegerPromotable,
2311 VectorType *PromotableVecTy,
2312 SmallSetVector<PHINode *, 8> &PHIUsers,
2313 SmallSetVector<SelectInst *, 8> &SelectUsers)
2314 : DL(DL), AS(AS), Pass(Pass), OldAI(OldAI), NewAI(NewAI),
2315 NewAllocaBeginOffset(NewAllocaBeginOffset),
2316 NewAllocaEndOffset(NewAllocaEndOffset),
2317 NewAllocaTy(NewAI.getAllocatedType()),
2318 IntTy(
2319 IsIntegerPromotable
2320 ? Type::getIntNTy(NewAI.getContext(),
2321 DL.getTypeSizeInBits(NewAI.getAllocatedType())
2322 .getFixedSize())
2323 : nullptr),
2324 VecTy(PromotableVecTy),
2325 ElementTy(VecTy ? VecTy->getElementType() : nullptr),
2326 ElementSize(VecTy ? DL.getTypeSizeInBits(ElementTy).getFixedSize() / 8
2327 : 0),
2328 PHIUsers(PHIUsers), SelectUsers(SelectUsers),
2329 IRB(NewAI.getContext(), ConstantFolder()) {
2330 if (VecTy) {
2331 assert((DL.getTypeSizeInBits(ElementTy).getFixedSize() % 8) == 0 &&
2332 "Only multiple-of-8 sized vector elements are viable");
2333 ++NumVectorized;
2334 }
2335 assert((!IntTy && !VecTy) || (IntTy && !VecTy) || (!IntTy && VecTy));
2336 }
2337
visit(AllocaSlices::const_iterator I)2338 bool visit(AllocaSlices::const_iterator I) {
2339 bool CanSROA = true;
2340 BeginOffset = I->beginOffset();
2341 EndOffset = I->endOffset();
2342 IsSplittable = I->isSplittable();
2343 IsSplit =
2344 BeginOffset < NewAllocaBeginOffset || EndOffset > NewAllocaEndOffset;
2345 LLVM_DEBUG(dbgs() << " rewriting " << (IsSplit ? "split " : ""));
2346 LLVM_DEBUG(AS.printSlice(dbgs(), I, ""));
2347 LLVM_DEBUG(dbgs() << "\n");
2348
2349 // Compute the intersecting offset range.
2350 assert(BeginOffset < NewAllocaEndOffset);
2351 assert(EndOffset > NewAllocaBeginOffset);
2352 NewBeginOffset = std::max(BeginOffset, NewAllocaBeginOffset);
2353 NewEndOffset = std::min(EndOffset, NewAllocaEndOffset);
2354
2355 SliceSize = NewEndOffset - NewBeginOffset;
2356
2357 OldUse = I->getUse();
2358 OldPtr = cast<Instruction>(OldUse->get());
2359
2360 Instruction *OldUserI = cast<Instruction>(OldUse->getUser());
2361 IRB.SetInsertPoint(OldUserI);
2362 IRB.SetCurrentDebugLocation(OldUserI->getDebugLoc());
2363 IRB.getInserter().SetNamePrefix(
2364 Twine(NewAI.getName()) + "." + Twine(BeginOffset) + ".");
2365
2366 CanSROA &= visit(cast<Instruction>(OldUse->getUser()));
2367 if (VecTy || IntTy)
2368 assert(CanSROA);
2369 return CanSROA;
2370 }
2371
2372 private:
2373 // Make sure the other visit overloads are visible.
2374 using Base::visit;
2375
2376 // Every instruction which can end up as a user must have a rewrite rule.
visitInstruction(Instruction & I)2377 bool visitInstruction(Instruction &I) {
2378 LLVM_DEBUG(dbgs() << " !!!! Cannot rewrite: " << I << "\n");
2379 llvm_unreachable("No rewrite rule for this instruction!");
2380 }
2381
getNewAllocaSlicePtr(IRBuilderTy & IRB,Type * PointerTy)2382 Value *getNewAllocaSlicePtr(IRBuilderTy &IRB, Type *PointerTy) {
2383 // Note that the offset computation can use BeginOffset or NewBeginOffset
2384 // interchangeably for unsplit slices.
2385 assert(IsSplit || BeginOffset == NewBeginOffset);
2386 uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset;
2387
2388 #ifndef NDEBUG
2389 StringRef OldName = OldPtr->getName();
2390 // Skip through the last '.sroa.' component of the name.
2391 size_t LastSROAPrefix = OldName.rfind(".sroa.");
2392 if (LastSROAPrefix != StringRef::npos) {
2393 OldName = OldName.substr(LastSROAPrefix + strlen(".sroa."));
2394 // Look for an SROA slice index.
2395 size_t IndexEnd = OldName.find_first_not_of("0123456789");
2396 if (IndexEnd != StringRef::npos && OldName[IndexEnd] == '.') {
2397 // Strip the index and look for the offset.
2398 OldName = OldName.substr(IndexEnd + 1);
2399 size_t OffsetEnd = OldName.find_first_not_of("0123456789");
2400 if (OffsetEnd != StringRef::npos && OldName[OffsetEnd] == '.')
2401 // Strip the offset.
2402 OldName = OldName.substr(OffsetEnd + 1);
2403 }
2404 }
2405 // Strip any SROA suffixes as well.
2406 OldName = OldName.substr(0, OldName.find(".sroa_"));
2407 #endif
2408
2409 return getAdjustedPtr(IRB, DL, &NewAI,
2410 APInt(DL.getIndexTypeSizeInBits(PointerTy), Offset),
2411 PointerTy,
2412 #ifndef NDEBUG
2413 Twine(OldName) + "."
2414 #else
2415 Twine()
2416 #endif
2417 );
2418 }
2419
2420 /// Compute suitable alignment to access this slice of the *new*
2421 /// alloca.
2422 ///
2423 /// You can optionally pass a type to this routine and if that type's ABI
2424 /// alignment is itself suitable, this will return zero.
getSliceAlign()2425 Align getSliceAlign() {
2426 return commonAlignment(NewAI.getAlign(),
2427 NewBeginOffset - NewAllocaBeginOffset);
2428 }
2429
getIndex(uint64_t Offset)2430 unsigned getIndex(uint64_t Offset) {
2431 assert(VecTy && "Can only call getIndex when rewriting a vector");
2432 uint64_t RelOffset = Offset - NewAllocaBeginOffset;
2433 assert(RelOffset / ElementSize < UINT32_MAX && "Index out of bounds");
2434 uint32_t Index = RelOffset / ElementSize;
2435 assert(Index * ElementSize == RelOffset);
2436 return Index;
2437 }
2438
deleteIfTriviallyDead(Value * V)2439 void deleteIfTriviallyDead(Value *V) {
2440 Instruction *I = cast<Instruction>(V);
2441 if (isInstructionTriviallyDead(I))
2442 Pass.DeadInsts.push_back(I);
2443 }
2444
rewriteVectorizedLoadInst(LoadInst & LI)2445 Value *rewriteVectorizedLoadInst(LoadInst &LI) {
2446 unsigned BeginIndex = getIndex(NewBeginOffset);
2447 unsigned EndIndex = getIndex(NewEndOffset);
2448 assert(EndIndex > BeginIndex && "Empty vector!");
2449
2450 LoadInst *Load = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI,
2451 NewAI.getAlign(), "load");
2452
2453 Load->copyMetadata(LI, {LLVMContext::MD_mem_parallel_loop_access,
2454 LLVMContext::MD_access_group});
2455 return extractVector(IRB, Load, BeginIndex, EndIndex, "vec");
2456 }
2457
rewriteIntegerLoad(LoadInst & LI)2458 Value *rewriteIntegerLoad(LoadInst &LI) {
2459 assert(IntTy && "We cannot insert an integer to the alloca");
2460 assert(!LI.isVolatile());
2461 Value *V = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI,
2462 NewAI.getAlign(), "load");
2463 V = convertValue(DL, IRB, V, IntTy);
2464 assert(NewBeginOffset >= NewAllocaBeginOffset && "Out of bounds offset");
2465 uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset;
2466 if (Offset > 0 || NewEndOffset < NewAllocaEndOffset) {
2467 IntegerType *ExtractTy = Type::getIntNTy(LI.getContext(), SliceSize * 8);
2468 V = extractInteger(DL, IRB, V, ExtractTy, Offset, "extract");
2469 }
2470 // It is possible that the extracted type is not the load type. This
2471 // happens if there is a load past the end of the alloca, and as
2472 // a consequence the slice is narrower but still a candidate for integer
2473 // lowering. To handle this case, we just zero extend the extracted
2474 // integer.
2475 assert(cast<IntegerType>(LI.getType())->getBitWidth() >= SliceSize * 8 &&
2476 "Can only handle an extract for an overly wide load");
2477 if (cast<IntegerType>(LI.getType())->getBitWidth() > SliceSize * 8)
2478 V = IRB.CreateZExt(V, LI.getType());
2479 return V;
2480 }
2481
visitLoadInst(LoadInst & LI)2482 bool visitLoadInst(LoadInst &LI) {
2483 LLVM_DEBUG(dbgs() << " original: " << LI << "\n");
2484 Value *OldOp = LI.getOperand(0);
2485 assert(OldOp == OldPtr);
2486
2487 AAMDNodes AATags = LI.getAAMetadata();
2488
2489 unsigned AS = LI.getPointerAddressSpace();
2490
2491 Type *TargetTy = IsSplit ? Type::getIntNTy(LI.getContext(), SliceSize * 8)
2492 : LI.getType();
2493 const bool IsLoadPastEnd =
2494 DL.getTypeStoreSize(TargetTy).getFixedSize() > SliceSize;
2495 bool IsPtrAdjusted = false;
2496 Value *V;
2497 if (VecTy) {
2498 V = rewriteVectorizedLoadInst(LI);
2499 } else if (IntTy && LI.getType()->isIntegerTy()) {
2500 V = rewriteIntegerLoad(LI);
2501 } else if (NewBeginOffset == NewAllocaBeginOffset &&
2502 NewEndOffset == NewAllocaEndOffset &&
2503 (canConvertValue(DL, NewAllocaTy, TargetTy) ||
2504 (IsLoadPastEnd && NewAllocaTy->isIntegerTy() &&
2505 TargetTy->isIntegerTy()))) {
2506 LoadInst *NewLI = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI,
2507 NewAI.getAlign(), LI.isVolatile(),
2508 LI.getName());
2509 if (AATags)
2510 NewLI->setAAMetadata(AATags.shift(NewBeginOffset - BeginOffset));
2511 if (LI.isVolatile())
2512 NewLI->setAtomic(LI.getOrdering(), LI.getSyncScopeID());
2513 if (NewLI->isAtomic())
2514 NewLI->setAlignment(LI.getAlign());
2515
2516 // Any !nonnull metadata or !range metadata on the old load is also valid
2517 // on the new load. This is even true in some cases even when the loads
2518 // are different types, for example by mapping !nonnull metadata to
2519 // !range metadata by modeling the null pointer constant converted to the
2520 // integer type.
2521 // FIXME: Add support for range metadata here. Currently the utilities
2522 // for this don't propagate range metadata in trivial cases from one
2523 // integer load to another, don't handle non-addrspace-0 null pointers
2524 // correctly, and don't have any support for mapping ranges as the
2525 // integer type becomes winder or narrower.
2526 if (MDNode *N = LI.getMetadata(LLVMContext::MD_nonnull))
2527 copyNonnullMetadata(LI, N, *NewLI);
2528
2529 // Try to preserve nonnull metadata
2530 V = NewLI;
2531
2532 // If this is an integer load past the end of the slice (which means the
2533 // bytes outside the slice are undef or this load is dead) just forcibly
2534 // fix the integer size with correct handling of endianness.
2535 if (auto *AITy = dyn_cast<IntegerType>(NewAllocaTy))
2536 if (auto *TITy = dyn_cast<IntegerType>(TargetTy))
2537 if (AITy->getBitWidth() < TITy->getBitWidth()) {
2538 V = IRB.CreateZExt(V, TITy, "load.ext");
2539 if (DL.isBigEndian())
2540 V = IRB.CreateShl(V, TITy->getBitWidth() - AITy->getBitWidth(),
2541 "endian_shift");
2542 }
2543 } else {
2544 Type *LTy = TargetTy->getPointerTo(AS);
2545 LoadInst *NewLI =
2546 IRB.CreateAlignedLoad(TargetTy, getNewAllocaSlicePtr(IRB, LTy),
2547 getSliceAlign(), LI.isVolatile(), LI.getName());
2548 if (AATags)
2549 NewLI->setAAMetadata(AATags.shift(NewBeginOffset - BeginOffset));
2550 if (LI.isVolatile())
2551 NewLI->setAtomic(LI.getOrdering(), LI.getSyncScopeID());
2552 NewLI->copyMetadata(LI, {LLVMContext::MD_mem_parallel_loop_access,
2553 LLVMContext::MD_access_group});
2554
2555 V = NewLI;
2556 IsPtrAdjusted = true;
2557 }
2558 V = convertValue(DL, IRB, V, TargetTy);
2559
2560 if (IsSplit) {
2561 assert(!LI.isVolatile());
2562 assert(LI.getType()->isIntegerTy() &&
2563 "Only integer type loads and stores are split");
2564 assert(SliceSize < DL.getTypeStoreSize(LI.getType()).getFixedSize() &&
2565 "Split load isn't smaller than original load");
2566 assert(DL.typeSizeEqualsStoreSize(LI.getType()) &&
2567 "Non-byte-multiple bit width");
2568 // Move the insertion point just past the load so that we can refer to it.
2569 IRB.SetInsertPoint(&*std::next(BasicBlock::iterator(&LI)));
2570 // Create a placeholder value with the same type as LI to use as the
2571 // basis for the new value. This allows us to replace the uses of LI with
2572 // the computed value, and then replace the placeholder with LI, leaving
2573 // LI only used for this computation.
2574 Value *Placeholder = new LoadInst(
2575 LI.getType(), PoisonValue::get(LI.getType()->getPointerTo(AS)), "",
2576 false, Align(1));
2577 V = insertInteger(DL, IRB, Placeholder, V, NewBeginOffset - BeginOffset,
2578 "insert");
2579 LI.replaceAllUsesWith(V);
2580 Placeholder->replaceAllUsesWith(&LI);
2581 Placeholder->deleteValue();
2582 } else {
2583 LI.replaceAllUsesWith(V);
2584 }
2585
2586 Pass.DeadInsts.push_back(&LI);
2587 deleteIfTriviallyDead(OldOp);
2588 LLVM_DEBUG(dbgs() << " to: " << *V << "\n");
2589 return !LI.isVolatile() && !IsPtrAdjusted;
2590 }
2591
rewriteVectorizedStoreInst(Value * V,StoreInst & SI,Value * OldOp,AAMDNodes AATags)2592 bool rewriteVectorizedStoreInst(Value *V, StoreInst &SI, Value *OldOp,
2593 AAMDNodes AATags) {
2594 if (V->getType() != VecTy) {
2595 unsigned BeginIndex = getIndex(NewBeginOffset);
2596 unsigned EndIndex = getIndex(NewEndOffset);
2597 assert(EndIndex > BeginIndex && "Empty vector!");
2598 unsigned NumElements = EndIndex - BeginIndex;
2599 assert(NumElements <= cast<FixedVectorType>(VecTy)->getNumElements() &&
2600 "Too many elements!");
2601 Type *SliceTy = (NumElements == 1)
2602 ? ElementTy
2603 : FixedVectorType::get(ElementTy, NumElements);
2604 if (V->getType() != SliceTy)
2605 V = convertValue(DL, IRB, V, SliceTy);
2606
2607 // Mix in the existing elements.
2608 Value *Old = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI,
2609 NewAI.getAlign(), "load");
2610 V = insertVector(IRB, Old, V, BeginIndex, "vec");
2611 }
2612 StoreInst *Store = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlign());
2613 Store->copyMetadata(SI, {LLVMContext::MD_mem_parallel_loop_access,
2614 LLVMContext::MD_access_group});
2615 if (AATags)
2616 Store->setAAMetadata(AATags.shift(NewBeginOffset - BeginOffset));
2617 Pass.DeadInsts.push_back(&SI);
2618
2619 LLVM_DEBUG(dbgs() << " to: " << *Store << "\n");
2620 return true;
2621 }
2622
rewriteIntegerStore(Value * V,StoreInst & SI,AAMDNodes AATags)2623 bool rewriteIntegerStore(Value *V, StoreInst &SI, AAMDNodes AATags) {
2624 assert(IntTy && "We cannot extract an integer from the alloca");
2625 assert(!SI.isVolatile());
2626 if (DL.getTypeSizeInBits(V->getType()).getFixedSize() !=
2627 IntTy->getBitWidth()) {
2628 Value *Old = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI,
2629 NewAI.getAlign(), "oldload");
2630 Old = convertValue(DL, IRB, Old, IntTy);
2631 assert(BeginOffset >= NewAllocaBeginOffset && "Out of bounds offset");
2632 uint64_t Offset = BeginOffset - NewAllocaBeginOffset;
2633 V = insertInteger(DL, IRB, Old, SI.getValueOperand(), Offset, "insert");
2634 }
2635 V = convertValue(DL, IRB, V, NewAllocaTy);
2636 StoreInst *Store = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlign());
2637 Store->copyMetadata(SI, {LLVMContext::MD_mem_parallel_loop_access,
2638 LLVMContext::MD_access_group});
2639 if (AATags)
2640 Store->setAAMetadata(AATags.shift(NewBeginOffset - BeginOffset));
2641 Pass.DeadInsts.push_back(&SI);
2642 LLVM_DEBUG(dbgs() << " to: " << *Store << "\n");
2643 return true;
2644 }
2645
visitStoreInst(StoreInst & SI)2646 bool visitStoreInst(StoreInst &SI) {
2647 LLVM_DEBUG(dbgs() << " original: " << SI << "\n");
2648 Value *OldOp = SI.getOperand(1);
2649 assert(OldOp == OldPtr);
2650
2651 AAMDNodes AATags = SI.getAAMetadata();
2652 Value *V = SI.getValueOperand();
2653
2654 // Strip all inbounds GEPs and pointer casts to try to dig out any root
2655 // alloca that should be re-examined after promoting this alloca.
2656 if (V->getType()->isPointerTy())
2657 if (AllocaInst *AI = dyn_cast<AllocaInst>(V->stripInBoundsOffsets()))
2658 Pass.PostPromotionWorklist.insert(AI);
2659
2660 if (SliceSize < DL.getTypeStoreSize(V->getType()).getFixedSize()) {
2661 assert(!SI.isVolatile());
2662 assert(V->getType()->isIntegerTy() &&
2663 "Only integer type loads and stores are split");
2664 assert(DL.typeSizeEqualsStoreSize(V->getType()) &&
2665 "Non-byte-multiple bit width");
2666 IntegerType *NarrowTy = Type::getIntNTy(SI.getContext(), SliceSize * 8);
2667 V = extractInteger(DL, IRB, V, NarrowTy, NewBeginOffset - BeginOffset,
2668 "extract");
2669 }
2670
2671 if (VecTy)
2672 return rewriteVectorizedStoreInst(V, SI, OldOp, AATags);
2673 if (IntTy && V->getType()->isIntegerTy())
2674 return rewriteIntegerStore(V, SI, AATags);
2675
2676 const bool IsStorePastEnd =
2677 DL.getTypeStoreSize(V->getType()).getFixedSize() > SliceSize;
2678 StoreInst *NewSI;
2679 if (NewBeginOffset == NewAllocaBeginOffset &&
2680 NewEndOffset == NewAllocaEndOffset &&
2681 (canConvertValue(DL, V->getType(), NewAllocaTy) ||
2682 (IsStorePastEnd && NewAllocaTy->isIntegerTy() &&
2683 V->getType()->isIntegerTy()))) {
2684 // If this is an integer store past the end of slice (and thus the bytes
2685 // past that point are irrelevant or this is unreachable), truncate the
2686 // value prior to storing.
2687 if (auto *VITy = dyn_cast<IntegerType>(V->getType()))
2688 if (auto *AITy = dyn_cast<IntegerType>(NewAllocaTy))
2689 if (VITy->getBitWidth() > AITy->getBitWidth()) {
2690 if (DL.isBigEndian())
2691 V = IRB.CreateLShr(V, VITy->getBitWidth() - AITy->getBitWidth(),
2692 "endian_shift");
2693 V = IRB.CreateTrunc(V, AITy, "load.trunc");
2694 }
2695
2696 V = convertValue(DL, IRB, V, NewAllocaTy);
2697 NewSI =
2698 IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlign(), SI.isVolatile());
2699 } else {
2700 unsigned AS = SI.getPointerAddressSpace();
2701 Value *NewPtr = getNewAllocaSlicePtr(IRB, V->getType()->getPointerTo(AS));
2702 NewSI =
2703 IRB.CreateAlignedStore(V, NewPtr, getSliceAlign(), SI.isVolatile());
2704 }
2705 NewSI->copyMetadata(SI, {LLVMContext::MD_mem_parallel_loop_access,
2706 LLVMContext::MD_access_group});
2707 if (AATags)
2708 NewSI->setAAMetadata(AATags.shift(NewBeginOffset - BeginOffset));
2709 if (SI.isVolatile())
2710 NewSI->setAtomic(SI.getOrdering(), SI.getSyncScopeID());
2711 if (NewSI->isAtomic())
2712 NewSI->setAlignment(SI.getAlign());
2713 Pass.DeadInsts.push_back(&SI);
2714 deleteIfTriviallyDead(OldOp);
2715
2716 LLVM_DEBUG(dbgs() << " to: " << *NewSI << "\n");
2717 return NewSI->getPointerOperand() == &NewAI &&
2718 NewSI->getValueOperand()->getType() == NewAllocaTy &&
2719 !SI.isVolatile();
2720 }
2721
2722 /// Compute an integer value from splatting an i8 across the given
2723 /// number of bytes.
2724 ///
2725 /// Note that this routine assumes an i8 is a byte. If that isn't true, don't
2726 /// call this routine.
2727 /// FIXME: Heed the advice above.
2728 ///
2729 /// \param V The i8 value to splat.
2730 /// \param Size The number of bytes in the output (assuming i8 is one byte)
getIntegerSplat(Value * V,unsigned Size)2731 Value *getIntegerSplat(Value *V, unsigned Size) {
2732 assert(Size > 0 && "Expected a positive number of bytes.");
2733 IntegerType *VTy = cast<IntegerType>(V->getType());
2734 assert(VTy->getBitWidth() == 8 && "Expected an i8 value for the byte");
2735 if (Size == 1)
2736 return V;
2737
2738 Type *SplatIntTy = Type::getIntNTy(VTy->getContext(), Size * 8);
2739 V = IRB.CreateMul(
2740 IRB.CreateZExt(V, SplatIntTy, "zext"),
2741 IRB.CreateUDiv(Constant::getAllOnesValue(SplatIntTy),
2742 IRB.CreateZExt(Constant::getAllOnesValue(V->getType()),
2743 SplatIntTy)),
2744 "isplat");
2745 return V;
2746 }
2747
2748 /// Compute a vector splat for a given element value.
getVectorSplat(Value * V,unsigned NumElements)2749 Value *getVectorSplat(Value *V, unsigned NumElements) {
2750 V = IRB.CreateVectorSplat(NumElements, V, "vsplat");
2751 LLVM_DEBUG(dbgs() << " splat: " << *V << "\n");
2752 return V;
2753 }
2754
visitMemSetInst(MemSetInst & II)2755 bool visitMemSetInst(MemSetInst &II) {
2756 LLVM_DEBUG(dbgs() << " original: " << II << "\n");
2757 assert(II.getRawDest() == OldPtr);
2758
2759 AAMDNodes AATags = II.getAAMetadata();
2760
2761 // If the memset has a variable size, it cannot be split, just adjust the
2762 // pointer to the new alloca.
2763 if (!isa<ConstantInt>(II.getLength())) {
2764 assert(!IsSplit);
2765 assert(NewBeginOffset == BeginOffset);
2766 II.setDest(getNewAllocaSlicePtr(IRB, OldPtr->getType()));
2767 II.setDestAlignment(getSliceAlign());
2768
2769 deleteIfTriviallyDead(OldPtr);
2770 return false;
2771 }
2772
2773 // Record this instruction for deletion.
2774 Pass.DeadInsts.push_back(&II);
2775
2776 Type *AllocaTy = NewAI.getAllocatedType();
2777 Type *ScalarTy = AllocaTy->getScalarType();
2778
2779 const bool CanContinue = [&]() {
2780 if (VecTy || IntTy)
2781 return true;
2782 if (BeginOffset > NewAllocaBeginOffset ||
2783 EndOffset < NewAllocaEndOffset)
2784 return false;
2785 // Length must be in range for FixedVectorType.
2786 auto *C = cast<ConstantInt>(II.getLength());
2787 const uint64_t Len = C->getLimitedValue();
2788 if (Len > std::numeric_limits<unsigned>::max())
2789 return false;
2790 auto *Int8Ty = IntegerType::getInt8Ty(NewAI.getContext());
2791 auto *SrcTy = FixedVectorType::get(Int8Ty, Len);
2792 return canConvertValue(DL, SrcTy, AllocaTy) &&
2793 DL.isLegalInteger(DL.getTypeSizeInBits(ScalarTy).getFixedSize());
2794 }();
2795
2796 // If this doesn't map cleanly onto the alloca type, and that type isn't
2797 // a single value type, just emit a memset.
2798 if (!CanContinue) {
2799 Type *SizeTy = II.getLength()->getType();
2800 Constant *Size = ConstantInt::get(SizeTy, NewEndOffset - NewBeginOffset);
2801 CallInst *New = IRB.CreateMemSet(
2802 getNewAllocaSlicePtr(IRB, OldPtr->getType()), II.getValue(), Size,
2803 MaybeAlign(getSliceAlign()), II.isVolatile());
2804 if (AATags)
2805 New->setAAMetadata(AATags.shift(NewBeginOffset - BeginOffset));
2806 LLVM_DEBUG(dbgs() << " to: " << *New << "\n");
2807 return false;
2808 }
2809
2810 // If we can represent this as a simple value, we have to build the actual
2811 // value to store, which requires expanding the byte present in memset to
2812 // a sensible representation for the alloca type. This is essentially
2813 // splatting the byte to a sufficiently wide integer, splatting it across
2814 // any desired vector width, and bitcasting to the final type.
2815 Value *V;
2816
2817 if (VecTy) {
2818 // If this is a memset of a vectorized alloca, insert it.
2819 assert(ElementTy == ScalarTy);
2820
2821 unsigned BeginIndex = getIndex(NewBeginOffset);
2822 unsigned EndIndex = getIndex(NewEndOffset);
2823 assert(EndIndex > BeginIndex && "Empty vector!");
2824 unsigned NumElements = EndIndex - BeginIndex;
2825 assert(NumElements <= cast<FixedVectorType>(VecTy)->getNumElements() &&
2826 "Too many elements!");
2827
2828 Value *Splat = getIntegerSplat(
2829 II.getValue(), DL.getTypeSizeInBits(ElementTy).getFixedSize() / 8);
2830 Splat = convertValue(DL, IRB, Splat, ElementTy);
2831 if (NumElements > 1)
2832 Splat = getVectorSplat(Splat, NumElements);
2833
2834 Value *Old = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI,
2835 NewAI.getAlign(), "oldload");
2836 V = insertVector(IRB, Old, Splat, BeginIndex, "vec");
2837 } else if (IntTy) {
2838 // If this is a memset on an alloca where we can widen stores, insert the
2839 // set integer.
2840 assert(!II.isVolatile());
2841
2842 uint64_t Size = NewEndOffset - NewBeginOffset;
2843 V = getIntegerSplat(II.getValue(), Size);
2844
2845 if (IntTy && (BeginOffset != NewAllocaBeginOffset ||
2846 EndOffset != NewAllocaBeginOffset)) {
2847 Value *Old = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI,
2848 NewAI.getAlign(), "oldload");
2849 Old = convertValue(DL, IRB, Old, IntTy);
2850 uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset;
2851 V = insertInteger(DL, IRB, Old, V, Offset, "insert");
2852 } else {
2853 assert(V->getType() == IntTy &&
2854 "Wrong type for an alloca wide integer!");
2855 }
2856 V = convertValue(DL, IRB, V, AllocaTy);
2857 } else {
2858 // Established these invariants above.
2859 assert(NewBeginOffset == NewAllocaBeginOffset);
2860 assert(NewEndOffset == NewAllocaEndOffset);
2861
2862 V = getIntegerSplat(II.getValue(),
2863 DL.getTypeSizeInBits(ScalarTy).getFixedSize() / 8);
2864 if (VectorType *AllocaVecTy = dyn_cast<VectorType>(AllocaTy))
2865 V = getVectorSplat(
2866 V, cast<FixedVectorType>(AllocaVecTy)->getNumElements());
2867
2868 V = convertValue(DL, IRB, V, AllocaTy);
2869 }
2870
2871 StoreInst *New =
2872 IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlign(), II.isVolatile());
2873 New->copyMetadata(II, {LLVMContext::MD_mem_parallel_loop_access,
2874 LLVMContext::MD_access_group});
2875 if (AATags)
2876 New->setAAMetadata(AATags.shift(NewBeginOffset - BeginOffset));
2877 LLVM_DEBUG(dbgs() << " to: " << *New << "\n");
2878 return !II.isVolatile();
2879 }
2880
visitMemTransferInst(MemTransferInst & II)2881 bool visitMemTransferInst(MemTransferInst &II) {
2882 // Rewriting of memory transfer instructions can be a bit tricky. We break
2883 // them into two categories: split intrinsics and unsplit intrinsics.
2884
2885 LLVM_DEBUG(dbgs() << " original: " << II << "\n");
2886
2887 AAMDNodes AATags = II.getAAMetadata();
2888
2889 bool IsDest = &II.getRawDestUse() == OldUse;
2890 assert((IsDest && II.getRawDest() == OldPtr) ||
2891 (!IsDest && II.getRawSource() == OldPtr));
2892
2893 Align SliceAlign = getSliceAlign();
2894
2895 // For unsplit intrinsics, we simply modify the source and destination
2896 // pointers in place. This isn't just an optimization, it is a matter of
2897 // correctness. With unsplit intrinsics we may be dealing with transfers
2898 // within a single alloca before SROA ran, or with transfers that have
2899 // a variable length. We may also be dealing with memmove instead of
2900 // memcpy, and so simply updating the pointers is the necessary for us to
2901 // update both source and dest of a single call.
2902 if (!IsSplittable) {
2903 Value *AdjustedPtr = getNewAllocaSlicePtr(IRB, OldPtr->getType());
2904 if (IsDest) {
2905 II.setDest(AdjustedPtr);
2906 II.setDestAlignment(SliceAlign);
2907 }
2908 else {
2909 II.setSource(AdjustedPtr);
2910 II.setSourceAlignment(SliceAlign);
2911 }
2912
2913 LLVM_DEBUG(dbgs() << " to: " << II << "\n");
2914 deleteIfTriviallyDead(OldPtr);
2915 return false;
2916 }
2917 // For split transfer intrinsics we have an incredibly useful assurance:
2918 // the source and destination do not reside within the same alloca, and at
2919 // least one of them does not escape. This means that we can replace
2920 // memmove with memcpy, and we don't need to worry about all manner of
2921 // downsides to splitting and transforming the operations.
2922
2923 // If this doesn't map cleanly onto the alloca type, and that type isn't
2924 // a single value type, just emit a memcpy.
2925 bool EmitMemCpy =
2926 !VecTy && !IntTy &&
2927 (BeginOffset > NewAllocaBeginOffset || EndOffset < NewAllocaEndOffset ||
2928 SliceSize !=
2929 DL.getTypeStoreSize(NewAI.getAllocatedType()).getFixedSize() ||
2930 !NewAI.getAllocatedType()->isSingleValueType());
2931
2932 // If we're just going to emit a memcpy, the alloca hasn't changed, and the
2933 // size hasn't been shrunk based on analysis of the viable range, this is
2934 // a no-op.
2935 if (EmitMemCpy && &OldAI == &NewAI) {
2936 // Ensure the start lines up.
2937 assert(NewBeginOffset == BeginOffset);
2938
2939 // Rewrite the size as needed.
2940 if (NewEndOffset != EndOffset)
2941 II.setLength(ConstantInt::get(II.getLength()->getType(),
2942 NewEndOffset - NewBeginOffset));
2943 return false;
2944 }
2945 // Record this instruction for deletion.
2946 Pass.DeadInsts.push_back(&II);
2947
2948 // Strip all inbounds GEPs and pointer casts to try to dig out any root
2949 // alloca that should be re-examined after rewriting this instruction.
2950 Value *OtherPtr = IsDest ? II.getRawSource() : II.getRawDest();
2951 if (AllocaInst *AI =
2952 dyn_cast<AllocaInst>(OtherPtr->stripInBoundsOffsets())) {
2953 assert(AI != &OldAI && AI != &NewAI &&
2954 "Splittable transfers cannot reach the same alloca on both ends.");
2955 Pass.Worklist.insert(AI);
2956 }
2957
2958 Type *OtherPtrTy = OtherPtr->getType();
2959 unsigned OtherAS = OtherPtrTy->getPointerAddressSpace();
2960
2961 // Compute the relative offset for the other pointer within the transfer.
2962 unsigned OffsetWidth = DL.getIndexSizeInBits(OtherAS);
2963 APInt OtherOffset(OffsetWidth, NewBeginOffset - BeginOffset);
2964 Align OtherAlign =
2965 (IsDest ? II.getSourceAlign() : II.getDestAlign()).valueOrOne();
2966 OtherAlign =
2967 commonAlignment(OtherAlign, OtherOffset.zextOrTrunc(64).getZExtValue());
2968
2969 if (EmitMemCpy) {
2970 // Compute the other pointer, folding as much as possible to produce
2971 // a single, simple GEP in most cases.
2972 OtherPtr = getAdjustedPtr(IRB, DL, OtherPtr, OtherOffset, OtherPtrTy,
2973 OtherPtr->getName() + ".");
2974
2975 Value *OurPtr = getNewAllocaSlicePtr(IRB, OldPtr->getType());
2976 Type *SizeTy = II.getLength()->getType();
2977 Constant *Size = ConstantInt::get(SizeTy, NewEndOffset - NewBeginOffset);
2978
2979 Value *DestPtr, *SrcPtr;
2980 MaybeAlign DestAlign, SrcAlign;
2981 // Note: IsDest is true iff we're copying into the new alloca slice
2982 if (IsDest) {
2983 DestPtr = OurPtr;
2984 DestAlign = SliceAlign;
2985 SrcPtr = OtherPtr;
2986 SrcAlign = OtherAlign;
2987 } else {
2988 DestPtr = OtherPtr;
2989 DestAlign = OtherAlign;
2990 SrcPtr = OurPtr;
2991 SrcAlign = SliceAlign;
2992 }
2993 CallInst *New = IRB.CreateMemCpy(DestPtr, DestAlign, SrcPtr, SrcAlign,
2994 Size, II.isVolatile());
2995 if (AATags)
2996 New->setAAMetadata(AATags.shift(NewBeginOffset - BeginOffset));
2997 LLVM_DEBUG(dbgs() << " to: " << *New << "\n");
2998 return false;
2999 }
3000
3001 bool IsWholeAlloca = NewBeginOffset == NewAllocaBeginOffset &&
3002 NewEndOffset == NewAllocaEndOffset;
3003 uint64_t Size = NewEndOffset - NewBeginOffset;
3004 unsigned BeginIndex = VecTy ? getIndex(NewBeginOffset) : 0;
3005 unsigned EndIndex = VecTy ? getIndex(NewEndOffset) : 0;
3006 unsigned NumElements = EndIndex - BeginIndex;
3007 IntegerType *SubIntTy =
3008 IntTy ? Type::getIntNTy(IntTy->getContext(), Size * 8) : nullptr;
3009
3010 // Reset the other pointer type to match the register type we're going to
3011 // use, but using the address space of the original other pointer.
3012 Type *OtherTy;
3013 if (VecTy && !IsWholeAlloca) {
3014 if (NumElements == 1)
3015 OtherTy = VecTy->getElementType();
3016 else
3017 OtherTy = FixedVectorType::get(VecTy->getElementType(), NumElements);
3018 } else if (IntTy && !IsWholeAlloca) {
3019 OtherTy = SubIntTy;
3020 } else {
3021 OtherTy = NewAllocaTy;
3022 }
3023 OtherPtrTy = OtherTy->getPointerTo(OtherAS);
3024
3025 Value *SrcPtr = getAdjustedPtr(IRB, DL, OtherPtr, OtherOffset, OtherPtrTy,
3026 OtherPtr->getName() + ".");
3027 MaybeAlign SrcAlign = OtherAlign;
3028 Value *DstPtr = &NewAI;
3029 MaybeAlign DstAlign = SliceAlign;
3030 if (!IsDest) {
3031 std::swap(SrcPtr, DstPtr);
3032 std::swap(SrcAlign, DstAlign);
3033 }
3034
3035 Value *Src;
3036 if (VecTy && !IsWholeAlloca && !IsDest) {
3037 Src = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI,
3038 NewAI.getAlign(), "load");
3039 Src = extractVector(IRB, Src, BeginIndex, EndIndex, "vec");
3040 } else if (IntTy && !IsWholeAlloca && !IsDest) {
3041 Src = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI,
3042 NewAI.getAlign(), "load");
3043 Src = convertValue(DL, IRB, Src, IntTy);
3044 uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset;
3045 Src = extractInteger(DL, IRB, Src, SubIntTy, Offset, "extract");
3046 } else {
3047 LoadInst *Load = IRB.CreateAlignedLoad(OtherTy, SrcPtr, SrcAlign,
3048 II.isVolatile(), "copyload");
3049 Load->copyMetadata(II, {LLVMContext::MD_mem_parallel_loop_access,
3050 LLVMContext::MD_access_group});
3051 if (AATags)
3052 Load->setAAMetadata(AATags.shift(NewBeginOffset - BeginOffset));
3053 Src = Load;
3054 }
3055
3056 if (VecTy && !IsWholeAlloca && IsDest) {
3057 Value *Old = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI,
3058 NewAI.getAlign(), "oldload");
3059 Src = insertVector(IRB, Old, Src, BeginIndex, "vec");
3060 } else if (IntTy && !IsWholeAlloca && IsDest) {
3061 Value *Old = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI,
3062 NewAI.getAlign(), "oldload");
3063 Old = convertValue(DL, IRB, Old, IntTy);
3064 uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset;
3065 Src = insertInteger(DL, IRB, Old, Src, Offset, "insert");
3066 Src = convertValue(DL, IRB, Src, NewAllocaTy);
3067 }
3068
3069 StoreInst *Store = cast<StoreInst>(
3070 IRB.CreateAlignedStore(Src, DstPtr, DstAlign, II.isVolatile()));
3071 Store->copyMetadata(II, {LLVMContext::MD_mem_parallel_loop_access,
3072 LLVMContext::MD_access_group});
3073 if (AATags)
3074 Store->setAAMetadata(AATags.shift(NewBeginOffset - BeginOffset));
3075 LLVM_DEBUG(dbgs() << " to: " << *Store << "\n");
3076 return !II.isVolatile();
3077 }
3078
visitIntrinsicInst(IntrinsicInst & II)3079 bool visitIntrinsicInst(IntrinsicInst &II) {
3080 assert((II.isLifetimeStartOrEnd() || II.isDroppable()) &&
3081 "Unexpected intrinsic!");
3082 LLVM_DEBUG(dbgs() << " original: " << II << "\n");
3083
3084 // Record this instruction for deletion.
3085 Pass.DeadInsts.push_back(&II);
3086
3087 if (II.isDroppable()) {
3088 assert(II.getIntrinsicID() == Intrinsic::assume && "Expected assume");
3089 // TODO For now we forget assumed information, this can be improved.
3090 OldPtr->dropDroppableUsesIn(II);
3091 return true;
3092 }
3093
3094 assert(II.getArgOperand(1) == OldPtr);
3095 // Lifetime intrinsics are only promotable if they cover the whole alloca.
3096 // Therefore, we drop lifetime intrinsics which don't cover the whole
3097 // alloca.
3098 // (In theory, intrinsics which partially cover an alloca could be
3099 // promoted, but PromoteMemToReg doesn't handle that case.)
3100 // FIXME: Check whether the alloca is promotable before dropping the
3101 // lifetime intrinsics?
3102 if (NewBeginOffset != NewAllocaBeginOffset ||
3103 NewEndOffset != NewAllocaEndOffset)
3104 return true;
3105
3106 ConstantInt *Size =
3107 ConstantInt::get(cast<IntegerType>(II.getArgOperand(0)->getType()),
3108 NewEndOffset - NewBeginOffset);
3109 // Lifetime intrinsics always expect an i8* so directly get such a pointer
3110 // for the new alloca slice.
3111 Type *PointerTy = IRB.getInt8PtrTy(OldPtr->getType()->getPointerAddressSpace());
3112 Value *Ptr = getNewAllocaSlicePtr(IRB, PointerTy);
3113 Value *New;
3114 if (II.getIntrinsicID() == Intrinsic::lifetime_start)
3115 New = IRB.CreateLifetimeStart(Ptr, Size);
3116 else
3117 New = IRB.CreateLifetimeEnd(Ptr, Size);
3118
3119 (void)New;
3120 LLVM_DEBUG(dbgs() << " to: " << *New << "\n");
3121
3122 return true;
3123 }
3124
fixLoadStoreAlign(Instruction & Root)3125 void fixLoadStoreAlign(Instruction &Root) {
3126 // This algorithm implements the same visitor loop as
3127 // hasUnsafePHIOrSelectUse, and fixes the alignment of each load
3128 // or store found.
3129 SmallPtrSet<Instruction *, 4> Visited;
3130 SmallVector<Instruction *, 4> Uses;
3131 Visited.insert(&Root);
3132 Uses.push_back(&Root);
3133 do {
3134 Instruction *I = Uses.pop_back_val();
3135
3136 if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
3137 LI->setAlignment(std::min(LI->getAlign(), getSliceAlign()));
3138 continue;
3139 }
3140 if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
3141 SI->setAlignment(std::min(SI->getAlign(), getSliceAlign()));
3142 continue;
3143 }
3144
3145 assert(isa<BitCastInst>(I) || isa<AddrSpaceCastInst>(I) ||
3146 isa<PHINode>(I) || isa<SelectInst>(I) ||
3147 isa<GetElementPtrInst>(I));
3148 for (User *U : I->users())
3149 if (Visited.insert(cast<Instruction>(U)).second)
3150 Uses.push_back(cast<Instruction>(U));
3151 } while (!Uses.empty());
3152 }
3153
visitPHINode(PHINode & PN)3154 bool visitPHINode(PHINode &PN) {
3155 LLVM_DEBUG(dbgs() << " original: " << PN << "\n");
3156 assert(BeginOffset >= NewAllocaBeginOffset && "PHIs are unsplittable");
3157 assert(EndOffset <= NewAllocaEndOffset && "PHIs are unsplittable");
3158
3159 // We would like to compute a new pointer in only one place, but have it be
3160 // as local as possible to the PHI. To do that, we re-use the location of
3161 // the old pointer, which necessarily must be in the right position to
3162 // dominate the PHI.
3163 IRBuilderBase::InsertPointGuard Guard(IRB);
3164 if (isa<PHINode>(OldPtr))
3165 IRB.SetInsertPoint(&*OldPtr->getParent()->getFirstInsertionPt());
3166 else
3167 IRB.SetInsertPoint(OldPtr);
3168 IRB.SetCurrentDebugLocation(OldPtr->getDebugLoc());
3169
3170 Value *NewPtr = getNewAllocaSlicePtr(IRB, OldPtr->getType());
3171 // Replace the operands which were using the old pointer.
3172 std::replace(PN.op_begin(), PN.op_end(), cast<Value>(OldPtr), NewPtr);
3173
3174 LLVM_DEBUG(dbgs() << " to: " << PN << "\n");
3175 deleteIfTriviallyDead(OldPtr);
3176
3177 // Fix the alignment of any loads or stores using this PHI node.
3178 fixLoadStoreAlign(PN);
3179
3180 // PHIs can't be promoted on their own, but often can be speculated. We
3181 // check the speculation outside of the rewriter so that we see the
3182 // fully-rewritten alloca.
3183 PHIUsers.insert(&PN);
3184 return true;
3185 }
3186
visitSelectInst(SelectInst & SI)3187 bool visitSelectInst(SelectInst &SI) {
3188 LLVM_DEBUG(dbgs() << " original: " << SI << "\n");
3189 assert((SI.getTrueValue() == OldPtr || SI.getFalseValue() == OldPtr) &&
3190 "Pointer isn't an operand!");
3191 assert(BeginOffset >= NewAllocaBeginOffset && "Selects are unsplittable");
3192 assert(EndOffset <= NewAllocaEndOffset && "Selects are unsplittable");
3193
3194 Value *NewPtr = getNewAllocaSlicePtr(IRB, OldPtr->getType());
3195 // Replace the operands which were using the old pointer.
3196 if (SI.getOperand(1) == OldPtr)
3197 SI.setOperand(1, NewPtr);
3198 if (SI.getOperand(2) == OldPtr)
3199 SI.setOperand(2, NewPtr);
3200
3201 LLVM_DEBUG(dbgs() << " to: " << SI << "\n");
3202 deleteIfTriviallyDead(OldPtr);
3203
3204 // Fix the alignment of any loads or stores using this select.
3205 fixLoadStoreAlign(SI);
3206
3207 // Selects can't be promoted on their own, but often can be speculated. We
3208 // check the speculation outside of the rewriter so that we see the
3209 // fully-rewritten alloca.
3210 SelectUsers.insert(&SI);
3211 return true;
3212 }
3213 };
3214
3215 namespace {
3216
3217 /// Visitor to rewrite aggregate loads and stores as scalar.
3218 ///
3219 /// This pass aggressively rewrites all aggregate loads and stores on
3220 /// a particular pointer (or any pointer derived from it which we can identify)
3221 /// with scalar loads and stores.
3222 class AggLoadStoreRewriter : public InstVisitor<AggLoadStoreRewriter, bool> {
3223 // Befriend the base class so it can delegate to private visit methods.
3224 friend class InstVisitor<AggLoadStoreRewriter, bool>;
3225
3226 /// Queue of pointer uses to analyze and potentially rewrite.
3227 SmallVector<Use *, 8> Queue;
3228
3229 /// Set to prevent us from cycling with phi nodes and loops.
3230 SmallPtrSet<User *, 8> Visited;
3231
3232 /// The current pointer use being rewritten. This is used to dig up the used
3233 /// value (as opposed to the user).
3234 Use *U = nullptr;
3235
3236 /// Used to calculate offsets, and hence alignment, of subobjects.
3237 const DataLayout &DL;
3238
3239 IRBuilderTy &IRB;
3240
3241 public:
AggLoadStoreRewriter(const DataLayout & DL,IRBuilderTy & IRB)3242 AggLoadStoreRewriter(const DataLayout &DL, IRBuilderTy &IRB)
3243 : DL(DL), IRB(IRB) {}
3244
3245 /// Rewrite loads and stores through a pointer and all pointers derived from
3246 /// it.
rewrite(Instruction & I)3247 bool rewrite(Instruction &I) {
3248 LLVM_DEBUG(dbgs() << " Rewriting FCA loads and stores...\n");
3249 enqueueUsers(I);
3250 bool Changed = false;
3251 while (!Queue.empty()) {
3252 U = Queue.pop_back_val();
3253 Changed |= visit(cast<Instruction>(U->getUser()));
3254 }
3255 return Changed;
3256 }
3257
3258 private:
3259 /// Enqueue all the users of the given instruction for further processing.
3260 /// This uses a set to de-duplicate users.
enqueueUsers(Instruction & I)3261 void enqueueUsers(Instruction &I) {
3262 for (Use &U : I.uses())
3263 if (Visited.insert(U.getUser()).second)
3264 Queue.push_back(&U);
3265 }
3266
3267 // Conservative default is to not rewrite anything.
visitInstruction(Instruction & I)3268 bool visitInstruction(Instruction &I) { return false; }
3269
3270 /// Generic recursive split emission class.
3271 template <typename Derived> class OpSplitter {
3272 protected:
3273 /// The builder used to form new instructions.
3274 IRBuilderTy &IRB;
3275
3276 /// The indices which to be used with insert- or extractvalue to select the
3277 /// appropriate value within the aggregate.
3278 SmallVector<unsigned, 4> Indices;
3279
3280 /// The indices to a GEP instruction which will move Ptr to the correct slot
3281 /// within the aggregate.
3282 SmallVector<Value *, 4> GEPIndices;
3283
3284 /// The base pointer of the original op, used as a base for GEPing the
3285 /// split operations.
3286 Value *Ptr;
3287
3288 /// The base pointee type being GEPed into.
3289 Type *BaseTy;
3290
3291 /// Known alignment of the base pointer.
3292 Align BaseAlign;
3293
3294 /// To calculate offset of each component so we can correctly deduce
3295 /// alignments.
3296 const DataLayout &DL;
3297
3298 /// Initialize the splitter with an insertion point, Ptr and start with a
3299 /// single zero GEP index.
OpSplitter(Instruction * InsertionPoint,Value * Ptr,Type * BaseTy,Align BaseAlign,const DataLayout & DL,IRBuilderTy & IRB)3300 OpSplitter(Instruction *InsertionPoint, Value *Ptr, Type *BaseTy,
3301 Align BaseAlign, const DataLayout &DL, IRBuilderTy &IRB)
3302 : IRB(IRB), GEPIndices(1, IRB.getInt32(0)), Ptr(Ptr), BaseTy(BaseTy),
3303 BaseAlign(BaseAlign), DL(DL) {
3304 IRB.SetInsertPoint(InsertionPoint);
3305 }
3306
3307 public:
3308 /// Generic recursive split emission routine.
3309 ///
3310 /// This method recursively splits an aggregate op (load or store) into
3311 /// scalar or vector ops. It splits recursively until it hits a single value
3312 /// and emits that single value operation via the template argument.
3313 ///
3314 /// The logic of this routine relies on GEPs and insertvalue and
3315 /// extractvalue all operating with the same fundamental index list, merely
3316 /// formatted differently (GEPs need actual values).
3317 ///
3318 /// \param Ty The type being split recursively into smaller ops.
3319 /// \param Agg The aggregate value being built up or stored, depending on
3320 /// whether this is splitting a load or a store respectively.
emitSplitOps(Type * Ty,Value * & Agg,const Twine & Name)3321 void emitSplitOps(Type *Ty, Value *&Agg, const Twine &Name) {
3322 if (Ty->isSingleValueType()) {
3323 unsigned Offset = DL.getIndexedOffsetInType(BaseTy, GEPIndices);
3324 return static_cast<Derived *>(this)->emitFunc(
3325 Ty, Agg, commonAlignment(BaseAlign, Offset), Name);
3326 }
3327
3328 if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
3329 unsigned OldSize = Indices.size();
3330 (void)OldSize;
3331 for (unsigned Idx = 0, Size = ATy->getNumElements(); Idx != Size;
3332 ++Idx) {
3333 assert(Indices.size() == OldSize && "Did not return to the old size");
3334 Indices.push_back(Idx);
3335 GEPIndices.push_back(IRB.getInt32(Idx));
3336 emitSplitOps(ATy->getElementType(), Agg, Name + "." + Twine(Idx));
3337 GEPIndices.pop_back();
3338 Indices.pop_back();
3339 }
3340 return;
3341 }
3342
3343 if (StructType *STy = dyn_cast<StructType>(Ty)) {
3344 unsigned OldSize = Indices.size();
3345 (void)OldSize;
3346 for (unsigned Idx = 0, Size = STy->getNumElements(); Idx != Size;
3347 ++Idx) {
3348 assert(Indices.size() == OldSize && "Did not return to the old size");
3349 Indices.push_back(Idx);
3350 GEPIndices.push_back(IRB.getInt32(Idx));
3351 emitSplitOps(STy->getElementType(Idx), Agg, Name + "." + Twine(Idx));
3352 GEPIndices.pop_back();
3353 Indices.pop_back();
3354 }
3355 return;
3356 }
3357
3358 llvm_unreachable("Only arrays and structs are aggregate loadable types");
3359 }
3360 };
3361
3362 struct LoadOpSplitter : public OpSplitter<LoadOpSplitter> {
3363 AAMDNodes AATags;
3364
LoadOpSplitter__anon3ad87db80b11::AggLoadStoreRewriter::LoadOpSplitter3365 LoadOpSplitter(Instruction *InsertionPoint, Value *Ptr, Type *BaseTy,
3366 AAMDNodes AATags, Align BaseAlign, const DataLayout &DL,
3367 IRBuilderTy &IRB)
3368 : OpSplitter<LoadOpSplitter>(InsertionPoint, Ptr, BaseTy, BaseAlign, DL,
3369 IRB),
3370 AATags(AATags) {}
3371
3372 /// Emit a leaf load of a single value. This is called at the leaves of the
3373 /// recursive emission to actually load values.
emitFunc__anon3ad87db80b11::AggLoadStoreRewriter::LoadOpSplitter3374 void emitFunc(Type *Ty, Value *&Agg, Align Alignment, const Twine &Name) {
3375 assert(Ty->isSingleValueType());
3376 // Load the single value and insert it using the indices.
3377 Value *GEP =
3378 IRB.CreateInBoundsGEP(BaseTy, Ptr, GEPIndices, Name + ".gep");
3379 LoadInst *Load =
3380 IRB.CreateAlignedLoad(Ty, GEP, Alignment, Name + ".load");
3381
3382 APInt Offset(
3383 DL.getIndexSizeInBits(Ptr->getType()->getPointerAddressSpace()), 0);
3384 if (AATags &&
3385 GEPOperator::accumulateConstantOffset(BaseTy, GEPIndices, DL, Offset))
3386 Load->setAAMetadata(AATags.shift(Offset.getZExtValue()));
3387
3388 Agg = IRB.CreateInsertValue(Agg, Load, Indices, Name + ".insert");
3389 LLVM_DEBUG(dbgs() << " to: " << *Load << "\n");
3390 }
3391 };
3392
visitLoadInst(LoadInst & LI)3393 bool visitLoadInst(LoadInst &LI) {
3394 assert(LI.getPointerOperand() == *U);
3395 if (!LI.isSimple() || LI.getType()->isSingleValueType())
3396 return false;
3397
3398 // We have an aggregate being loaded, split it apart.
3399 LLVM_DEBUG(dbgs() << " original: " << LI << "\n");
3400 LoadOpSplitter Splitter(&LI, *U, LI.getType(), LI.getAAMetadata(),
3401 getAdjustedAlignment(&LI, 0), DL, IRB);
3402 Value *V = PoisonValue::get(LI.getType());
3403 Splitter.emitSplitOps(LI.getType(), V, LI.getName() + ".fca");
3404 Visited.erase(&LI);
3405 LI.replaceAllUsesWith(V);
3406 LI.eraseFromParent();
3407 return true;
3408 }
3409
3410 struct StoreOpSplitter : public OpSplitter<StoreOpSplitter> {
StoreOpSplitter__anon3ad87db80b11::AggLoadStoreRewriter::StoreOpSplitter3411 StoreOpSplitter(Instruction *InsertionPoint, Value *Ptr, Type *BaseTy,
3412 AAMDNodes AATags, Align BaseAlign, const DataLayout &DL,
3413 IRBuilderTy &IRB)
3414 : OpSplitter<StoreOpSplitter>(InsertionPoint, Ptr, BaseTy, BaseAlign,
3415 DL, IRB),
3416 AATags(AATags) {}
3417 AAMDNodes AATags;
3418 /// Emit a leaf store of a single value. This is called at the leaves of the
3419 /// recursive emission to actually produce stores.
emitFunc__anon3ad87db80b11::AggLoadStoreRewriter::StoreOpSplitter3420 void emitFunc(Type *Ty, Value *&Agg, Align Alignment, const Twine &Name) {
3421 assert(Ty->isSingleValueType());
3422 // Extract the single value and store it using the indices.
3423 //
3424 // The gep and extractvalue values are factored out of the CreateStore
3425 // call to make the output independent of the argument evaluation order.
3426 Value *ExtractValue =
3427 IRB.CreateExtractValue(Agg, Indices, Name + ".extract");
3428 Value *InBoundsGEP =
3429 IRB.CreateInBoundsGEP(BaseTy, Ptr, GEPIndices, Name + ".gep");
3430 StoreInst *Store =
3431 IRB.CreateAlignedStore(ExtractValue, InBoundsGEP, Alignment);
3432
3433 APInt Offset(
3434 DL.getIndexSizeInBits(Ptr->getType()->getPointerAddressSpace()), 0);
3435 if (AATags &&
3436 GEPOperator::accumulateConstantOffset(BaseTy, GEPIndices, DL, Offset))
3437 Store->setAAMetadata(AATags.shift(Offset.getZExtValue()));
3438
3439 LLVM_DEBUG(dbgs() << " to: " << *Store << "\n");
3440 }
3441 };
3442
visitStoreInst(StoreInst & SI)3443 bool visitStoreInst(StoreInst &SI) {
3444 if (!SI.isSimple() || SI.getPointerOperand() != *U)
3445 return false;
3446 Value *V = SI.getValueOperand();
3447 if (V->getType()->isSingleValueType())
3448 return false;
3449
3450 // We have an aggregate being stored, split it apart.
3451 LLVM_DEBUG(dbgs() << " original: " << SI << "\n");
3452 StoreOpSplitter Splitter(&SI, *U, V->getType(), SI.getAAMetadata(),
3453 getAdjustedAlignment(&SI, 0), DL, IRB);
3454 Splitter.emitSplitOps(V->getType(), V, V->getName() + ".fca");
3455 Visited.erase(&SI);
3456 SI.eraseFromParent();
3457 return true;
3458 }
3459
visitBitCastInst(BitCastInst & BC)3460 bool visitBitCastInst(BitCastInst &BC) {
3461 enqueueUsers(BC);
3462 return false;
3463 }
3464
visitAddrSpaceCastInst(AddrSpaceCastInst & ASC)3465 bool visitAddrSpaceCastInst(AddrSpaceCastInst &ASC) {
3466 enqueueUsers(ASC);
3467 return false;
3468 }
3469
3470 // Fold gep (select cond, ptr1, ptr2) => select cond, gep(ptr1), gep(ptr2)
foldGEPSelect(GetElementPtrInst & GEPI)3471 bool foldGEPSelect(GetElementPtrInst &GEPI) {
3472 if (!GEPI.hasAllConstantIndices())
3473 return false;
3474
3475 SelectInst *Sel = cast<SelectInst>(GEPI.getPointerOperand());
3476
3477 LLVM_DEBUG(dbgs() << " Rewriting gep(select) -> select(gep):"
3478 << "\n original: " << *Sel
3479 << "\n " << GEPI);
3480
3481 IRB.SetInsertPoint(&GEPI);
3482 SmallVector<Value *, 4> Index(GEPI.indices());
3483 bool IsInBounds = GEPI.isInBounds();
3484
3485 Type *Ty = GEPI.getSourceElementType();
3486 Value *True = Sel->getTrueValue();
3487 Value *NTrue = IRB.CreateGEP(Ty, True, Index, True->getName() + ".sroa.gep",
3488 IsInBounds);
3489
3490 Value *False = Sel->getFalseValue();
3491
3492 Value *NFalse = IRB.CreateGEP(Ty, False, Index,
3493 False->getName() + ".sroa.gep", IsInBounds);
3494
3495 Value *NSel = IRB.CreateSelect(Sel->getCondition(), NTrue, NFalse,
3496 Sel->getName() + ".sroa.sel");
3497 Visited.erase(&GEPI);
3498 GEPI.replaceAllUsesWith(NSel);
3499 GEPI.eraseFromParent();
3500 Instruction *NSelI = cast<Instruction>(NSel);
3501 Visited.insert(NSelI);
3502 enqueueUsers(*NSelI);
3503
3504 LLVM_DEBUG(dbgs() << "\n to: " << *NTrue
3505 << "\n " << *NFalse
3506 << "\n " << *NSel << '\n');
3507
3508 return true;
3509 }
3510
3511 // Fold gep (phi ptr1, ptr2) => phi gep(ptr1), gep(ptr2)
foldGEPPhi(GetElementPtrInst & GEPI)3512 bool foldGEPPhi(GetElementPtrInst &GEPI) {
3513 if (!GEPI.hasAllConstantIndices())
3514 return false;
3515
3516 PHINode *PHI = cast<PHINode>(GEPI.getPointerOperand());
3517 if (GEPI.getParent() != PHI->getParent() ||
3518 llvm::any_of(PHI->incoming_values(), [](Value *In)
3519 { Instruction *I = dyn_cast<Instruction>(In);
3520 return !I || isa<GetElementPtrInst>(I) || isa<PHINode>(I) ||
3521 succ_empty(I->getParent()) ||
3522 !I->getParent()->isLegalToHoistInto();
3523 }))
3524 return false;
3525
3526 LLVM_DEBUG(dbgs() << " Rewriting gep(phi) -> phi(gep):"
3527 << "\n original: " << *PHI
3528 << "\n " << GEPI
3529 << "\n to: ");
3530
3531 SmallVector<Value *, 4> Index(GEPI.indices());
3532 bool IsInBounds = GEPI.isInBounds();
3533 IRB.SetInsertPoint(GEPI.getParent()->getFirstNonPHI());
3534 PHINode *NewPN = IRB.CreatePHI(GEPI.getType(), PHI->getNumIncomingValues(),
3535 PHI->getName() + ".sroa.phi");
3536 for (unsigned I = 0, E = PHI->getNumIncomingValues(); I != E; ++I) {
3537 BasicBlock *B = PHI->getIncomingBlock(I);
3538 Value *NewVal = nullptr;
3539 int Idx = NewPN->getBasicBlockIndex(B);
3540 if (Idx >= 0) {
3541 NewVal = NewPN->getIncomingValue(Idx);
3542 } else {
3543 Instruction *In = cast<Instruction>(PHI->getIncomingValue(I));
3544
3545 IRB.SetInsertPoint(In->getParent(), std::next(In->getIterator()));
3546 Type *Ty = GEPI.getSourceElementType();
3547 NewVal = IRB.CreateGEP(Ty, In, Index, In->getName() + ".sroa.gep",
3548 IsInBounds);
3549 }
3550 NewPN->addIncoming(NewVal, B);
3551 }
3552
3553 Visited.erase(&GEPI);
3554 GEPI.replaceAllUsesWith(NewPN);
3555 GEPI.eraseFromParent();
3556 Visited.insert(NewPN);
3557 enqueueUsers(*NewPN);
3558
3559 LLVM_DEBUG(for (Value *In : NewPN->incoming_values())
3560 dbgs() << "\n " << *In;
3561 dbgs() << "\n " << *NewPN << '\n');
3562
3563 return true;
3564 }
3565
visitGetElementPtrInst(GetElementPtrInst & GEPI)3566 bool visitGetElementPtrInst(GetElementPtrInst &GEPI) {
3567 if (isa<SelectInst>(GEPI.getPointerOperand()) &&
3568 foldGEPSelect(GEPI))
3569 return true;
3570
3571 if (isa<PHINode>(GEPI.getPointerOperand()) &&
3572 foldGEPPhi(GEPI))
3573 return true;
3574
3575 enqueueUsers(GEPI);
3576 return false;
3577 }
3578
visitPHINode(PHINode & PN)3579 bool visitPHINode(PHINode &PN) {
3580 enqueueUsers(PN);
3581 return false;
3582 }
3583
visitSelectInst(SelectInst & SI)3584 bool visitSelectInst(SelectInst &SI) {
3585 enqueueUsers(SI);
3586 return false;
3587 }
3588 };
3589
3590 } // end anonymous namespace
3591
3592 /// Strip aggregate type wrapping.
3593 ///
3594 /// This removes no-op aggregate types wrapping an underlying type. It will
3595 /// strip as many layers of types as it can without changing either the type
3596 /// size or the allocated size.
stripAggregateTypeWrapping(const DataLayout & DL,Type * Ty)3597 static Type *stripAggregateTypeWrapping(const DataLayout &DL, Type *Ty) {
3598 if (Ty->isSingleValueType())
3599 return Ty;
3600
3601 uint64_t AllocSize = DL.getTypeAllocSize(Ty).getFixedSize();
3602 uint64_t TypeSize = DL.getTypeSizeInBits(Ty).getFixedSize();
3603
3604 Type *InnerTy;
3605 if (ArrayType *ArrTy = dyn_cast<ArrayType>(Ty)) {
3606 InnerTy = ArrTy->getElementType();
3607 } else if (StructType *STy = dyn_cast<StructType>(Ty)) {
3608 const StructLayout *SL = DL.getStructLayout(STy);
3609 unsigned Index = SL->getElementContainingOffset(0);
3610 InnerTy = STy->getElementType(Index);
3611 } else {
3612 return Ty;
3613 }
3614
3615 if (AllocSize > DL.getTypeAllocSize(InnerTy).getFixedSize() ||
3616 TypeSize > DL.getTypeSizeInBits(InnerTy).getFixedSize())
3617 return Ty;
3618
3619 return stripAggregateTypeWrapping(DL, InnerTy);
3620 }
3621
3622 /// Try to find a partition of the aggregate type passed in for a given
3623 /// offset and size.
3624 ///
3625 /// This recurses through the aggregate type and tries to compute a subtype
3626 /// based on the offset and size. When the offset and size span a sub-section
3627 /// of an array, it will even compute a new array type for that sub-section,
3628 /// and the same for structs.
3629 ///
3630 /// Note that this routine is very strict and tries to find a partition of the
3631 /// type which produces the *exact* right offset and size. It is not forgiving
3632 /// when the size or offset cause either end of type-based partition to be off.
3633 /// Also, this is a best-effort routine. It is reasonable to give up and not
3634 /// return a type if necessary.
getTypePartition(const DataLayout & DL,Type * Ty,uint64_t Offset,uint64_t Size)3635 static Type *getTypePartition(const DataLayout &DL, Type *Ty, uint64_t Offset,
3636 uint64_t Size) {
3637 if (Offset == 0 && DL.getTypeAllocSize(Ty).getFixedSize() == Size)
3638 return stripAggregateTypeWrapping(DL, Ty);
3639 if (Offset > DL.getTypeAllocSize(Ty).getFixedSize() ||
3640 (DL.getTypeAllocSize(Ty).getFixedSize() - Offset) < Size)
3641 return nullptr;
3642
3643 if (isa<ArrayType>(Ty) || isa<VectorType>(Ty)) {
3644 Type *ElementTy;
3645 uint64_t TyNumElements;
3646 if (auto *AT = dyn_cast<ArrayType>(Ty)) {
3647 ElementTy = AT->getElementType();
3648 TyNumElements = AT->getNumElements();
3649 } else {
3650 // FIXME: This isn't right for vectors with non-byte-sized or
3651 // non-power-of-two sized elements.
3652 auto *VT = cast<FixedVectorType>(Ty);
3653 ElementTy = VT->getElementType();
3654 TyNumElements = VT->getNumElements();
3655 }
3656 uint64_t ElementSize = DL.getTypeAllocSize(ElementTy).getFixedSize();
3657 uint64_t NumSkippedElements = Offset / ElementSize;
3658 if (NumSkippedElements >= TyNumElements)
3659 return nullptr;
3660 Offset -= NumSkippedElements * ElementSize;
3661
3662 // First check if we need to recurse.
3663 if (Offset > 0 || Size < ElementSize) {
3664 // Bail if the partition ends in a different array element.
3665 if ((Offset + Size) > ElementSize)
3666 return nullptr;
3667 // Recurse through the element type trying to peel off offset bytes.
3668 return getTypePartition(DL, ElementTy, Offset, Size);
3669 }
3670 assert(Offset == 0);
3671
3672 if (Size == ElementSize)
3673 return stripAggregateTypeWrapping(DL, ElementTy);
3674 assert(Size > ElementSize);
3675 uint64_t NumElements = Size / ElementSize;
3676 if (NumElements * ElementSize != Size)
3677 return nullptr;
3678 return ArrayType::get(ElementTy, NumElements);
3679 }
3680
3681 StructType *STy = dyn_cast<StructType>(Ty);
3682 if (!STy)
3683 return nullptr;
3684
3685 const StructLayout *SL = DL.getStructLayout(STy);
3686 if (Offset >= SL->getSizeInBytes())
3687 return nullptr;
3688 uint64_t EndOffset = Offset + Size;
3689 if (EndOffset > SL->getSizeInBytes())
3690 return nullptr;
3691
3692 unsigned Index = SL->getElementContainingOffset(Offset);
3693 Offset -= SL->getElementOffset(Index);
3694
3695 Type *ElementTy = STy->getElementType(Index);
3696 uint64_t ElementSize = DL.getTypeAllocSize(ElementTy).getFixedSize();
3697 if (Offset >= ElementSize)
3698 return nullptr; // The offset points into alignment padding.
3699
3700 // See if any partition must be contained by the element.
3701 if (Offset > 0 || Size < ElementSize) {
3702 if ((Offset + Size) > ElementSize)
3703 return nullptr;
3704 return getTypePartition(DL, ElementTy, Offset, Size);
3705 }
3706 assert(Offset == 0);
3707
3708 if (Size == ElementSize)
3709 return stripAggregateTypeWrapping(DL, ElementTy);
3710
3711 StructType::element_iterator EI = STy->element_begin() + Index,
3712 EE = STy->element_end();
3713 if (EndOffset < SL->getSizeInBytes()) {
3714 unsigned EndIndex = SL->getElementContainingOffset(EndOffset);
3715 if (Index == EndIndex)
3716 return nullptr; // Within a single element and its padding.
3717
3718 // Don't try to form "natural" types if the elements don't line up with the
3719 // expected size.
3720 // FIXME: We could potentially recurse down through the last element in the
3721 // sub-struct to find a natural end point.
3722 if (SL->getElementOffset(EndIndex) != EndOffset)
3723 return nullptr;
3724
3725 assert(Index < EndIndex);
3726 EE = STy->element_begin() + EndIndex;
3727 }
3728
3729 // Try to build up a sub-structure.
3730 StructType *SubTy =
3731 StructType::get(STy->getContext(), makeArrayRef(EI, EE), STy->isPacked());
3732 const StructLayout *SubSL = DL.getStructLayout(SubTy);
3733 if (Size != SubSL->getSizeInBytes())
3734 return nullptr; // The sub-struct doesn't have quite the size needed.
3735
3736 return SubTy;
3737 }
3738
3739 /// Pre-split loads and stores to simplify rewriting.
3740 ///
3741 /// We want to break up the splittable load+store pairs as much as
3742 /// possible. This is important to do as a preprocessing step, as once we
3743 /// start rewriting the accesses to partitions of the alloca we lose the
3744 /// necessary information to correctly split apart paired loads and stores
3745 /// which both point into this alloca. The case to consider is something like
3746 /// the following:
3747 ///
3748 /// %a = alloca [12 x i8]
3749 /// %gep1 = getelementptr [12 x i8]* %a, i32 0, i32 0
3750 /// %gep2 = getelementptr [12 x i8]* %a, i32 0, i32 4
3751 /// %gep3 = getelementptr [12 x i8]* %a, i32 0, i32 8
3752 /// %iptr1 = bitcast i8* %gep1 to i64*
3753 /// %iptr2 = bitcast i8* %gep2 to i64*
3754 /// %fptr1 = bitcast i8* %gep1 to float*
3755 /// %fptr2 = bitcast i8* %gep2 to float*
3756 /// %fptr3 = bitcast i8* %gep3 to float*
3757 /// store float 0.0, float* %fptr1
3758 /// store float 1.0, float* %fptr2
3759 /// %v = load i64* %iptr1
3760 /// store i64 %v, i64* %iptr2
3761 /// %f1 = load float* %fptr2
3762 /// %f2 = load float* %fptr3
3763 ///
3764 /// Here we want to form 3 partitions of the alloca, each 4 bytes large, and
3765 /// promote everything so we recover the 2 SSA values that should have been
3766 /// there all along.
3767 ///
3768 /// \returns true if any changes are made.
presplitLoadsAndStores(AllocaInst & AI,AllocaSlices & AS)3769 bool SROAPass::presplitLoadsAndStores(AllocaInst &AI, AllocaSlices &AS) {
3770 LLVM_DEBUG(dbgs() << "Pre-splitting loads and stores\n");
3771
3772 // Track the loads and stores which are candidates for pre-splitting here, in
3773 // the order they first appear during the partition scan. These give stable
3774 // iteration order and a basis for tracking which loads and stores we
3775 // actually split.
3776 SmallVector<LoadInst *, 4> Loads;
3777 SmallVector<StoreInst *, 4> Stores;
3778
3779 // We need to accumulate the splits required of each load or store where we
3780 // can find them via a direct lookup. This is important to cross-check loads
3781 // and stores against each other. We also track the slice so that we can kill
3782 // all the slices that end up split.
3783 struct SplitOffsets {
3784 Slice *S;
3785 std::vector<uint64_t> Splits;
3786 };
3787 SmallDenseMap<Instruction *, SplitOffsets, 8> SplitOffsetsMap;
3788
3789 // Track loads out of this alloca which cannot, for any reason, be pre-split.
3790 // This is important as we also cannot pre-split stores of those loads!
3791 // FIXME: This is all pretty gross. It means that we can be more aggressive
3792 // in pre-splitting when the load feeding the store happens to come from
3793 // a separate alloca. Put another way, the effectiveness of SROA would be
3794 // decreased by a frontend which just concatenated all of its local allocas
3795 // into one big flat alloca. But defeating such patterns is exactly the job
3796 // SROA is tasked with! Sadly, to not have this discrepancy we would have
3797 // change store pre-splitting to actually force pre-splitting of the load
3798 // that feeds it *and all stores*. That makes pre-splitting much harder, but
3799 // maybe it would make it more principled?
3800 SmallPtrSet<LoadInst *, 8> UnsplittableLoads;
3801
3802 LLVM_DEBUG(dbgs() << " Searching for candidate loads and stores\n");
3803 for (auto &P : AS.partitions()) {
3804 for (Slice &S : P) {
3805 Instruction *I = cast<Instruction>(S.getUse()->getUser());
3806 if (!S.isSplittable() || S.endOffset() <= P.endOffset()) {
3807 // If this is a load we have to track that it can't participate in any
3808 // pre-splitting. If this is a store of a load we have to track that
3809 // that load also can't participate in any pre-splitting.
3810 if (auto *LI = dyn_cast<LoadInst>(I))
3811 UnsplittableLoads.insert(LI);
3812 else if (auto *SI = dyn_cast<StoreInst>(I))
3813 if (auto *LI = dyn_cast<LoadInst>(SI->getValueOperand()))
3814 UnsplittableLoads.insert(LI);
3815 continue;
3816 }
3817 assert(P.endOffset() > S.beginOffset() &&
3818 "Empty or backwards partition!");
3819
3820 // Determine if this is a pre-splittable slice.
3821 if (auto *LI = dyn_cast<LoadInst>(I)) {
3822 assert(!LI->isVolatile() && "Cannot split volatile loads!");
3823
3824 // The load must be used exclusively to store into other pointers for
3825 // us to be able to arbitrarily pre-split it. The stores must also be
3826 // simple to avoid changing semantics.
3827 auto IsLoadSimplyStored = [](LoadInst *LI) {
3828 for (User *LU : LI->users()) {
3829 auto *SI = dyn_cast<StoreInst>(LU);
3830 if (!SI || !SI->isSimple())
3831 return false;
3832 }
3833 return true;
3834 };
3835 if (!IsLoadSimplyStored(LI)) {
3836 UnsplittableLoads.insert(LI);
3837 continue;
3838 }
3839
3840 Loads.push_back(LI);
3841 } else if (auto *SI = dyn_cast<StoreInst>(I)) {
3842 if (S.getUse() != &SI->getOperandUse(SI->getPointerOperandIndex()))
3843 // Skip stores *of* pointers. FIXME: This shouldn't even be possible!
3844 continue;
3845 auto *StoredLoad = dyn_cast<LoadInst>(SI->getValueOperand());
3846 if (!StoredLoad || !StoredLoad->isSimple())
3847 continue;
3848 assert(!SI->isVolatile() && "Cannot split volatile stores!");
3849
3850 Stores.push_back(SI);
3851 } else {
3852 // Other uses cannot be pre-split.
3853 continue;
3854 }
3855
3856 // Record the initial split.
3857 LLVM_DEBUG(dbgs() << " Candidate: " << *I << "\n");
3858 auto &Offsets = SplitOffsetsMap[I];
3859 assert(Offsets.Splits.empty() &&
3860 "Should not have splits the first time we see an instruction!");
3861 Offsets.S = &S;
3862 Offsets.Splits.push_back(P.endOffset() - S.beginOffset());
3863 }
3864
3865 // Now scan the already split slices, and add a split for any of them which
3866 // we're going to pre-split.
3867 for (Slice *S : P.splitSliceTails()) {
3868 auto SplitOffsetsMapI =
3869 SplitOffsetsMap.find(cast<Instruction>(S->getUse()->getUser()));
3870 if (SplitOffsetsMapI == SplitOffsetsMap.end())
3871 continue;
3872 auto &Offsets = SplitOffsetsMapI->second;
3873
3874 assert(Offsets.S == S && "Found a mismatched slice!");
3875 assert(!Offsets.Splits.empty() &&
3876 "Cannot have an empty set of splits on the second partition!");
3877 assert(Offsets.Splits.back() ==
3878 P.beginOffset() - Offsets.S->beginOffset() &&
3879 "Previous split does not end where this one begins!");
3880
3881 // Record each split. The last partition's end isn't needed as the size
3882 // of the slice dictates that.
3883 if (S->endOffset() > P.endOffset())
3884 Offsets.Splits.push_back(P.endOffset() - Offsets.S->beginOffset());
3885 }
3886 }
3887
3888 // We may have split loads where some of their stores are split stores. For
3889 // such loads and stores, we can only pre-split them if their splits exactly
3890 // match relative to their starting offset. We have to verify this prior to
3891 // any rewriting.
3892 llvm::erase_if(Stores, [&UnsplittableLoads, &SplitOffsetsMap](StoreInst *SI) {
3893 // Lookup the load we are storing in our map of split
3894 // offsets.
3895 auto *LI = cast<LoadInst>(SI->getValueOperand());
3896 // If it was completely unsplittable, then we're done,
3897 // and this store can't be pre-split.
3898 if (UnsplittableLoads.count(LI))
3899 return true;
3900
3901 auto LoadOffsetsI = SplitOffsetsMap.find(LI);
3902 if (LoadOffsetsI == SplitOffsetsMap.end())
3903 return false; // Unrelated loads are definitely safe.
3904 auto &LoadOffsets = LoadOffsetsI->second;
3905
3906 // Now lookup the store's offsets.
3907 auto &StoreOffsets = SplitOffsetsMap[SI];
3908
3909 // If the relative offsets of each split in the load and
3910 // store match exactly, then we can split them and we
3911 // don't need to remove them here.
3912 if (LoadOffsets.Splits == StoreOffsets.Splits)
3913 return false;
3914
3915 LLVM_DEBUG(dbgs() << " Mismatched splits for load and store:\n"
3916 << " " << *LI << "\n"
3917 << " " << *SI << "\n");
3918
3919 // We've found a store and load that we need to split
3920 // with mismatched relative splits. Just give up on them
3921 // and remove both instructions from our list of
3922 // candidates.
3923 UnsplittableLoads.insert(LI);
3924 return true;
3925 });
3926 // Now we have to go *back* through all the stores, because a later store may
3927 // have caused an earlier store's load to become unsplittable and if it is
3928 // unsplittable for the later store, then we can't rely on it being split in
3929 // the earlier store either.
3930 llvm::erase_if(Stores, [&UnsplittableLoads](StoreInst *SI) {
3931 auto *LI = cast<LoadInst>(SI->getValueOperand());
3932 return UnsplittableLoads.count(LI);
3933 });
3934 // Once we've established all the loads that can't be split for some reason,
3935 // filter any that made it into our list out.
3936 llvm::erase_if(Loads, [&UnsplittableLoads](LoadInst *LI) {
3937 return UnsplittableLoads.count(LI);
3938 });
3939
3940 // If no loads or stores are left, there is no pre-splitting to be done for
3941 // this alloca.
3942 if (Loads.empty() && Stores.empty())
3943 return false;
3944
3945 // From here on, we can't fail and will be building new accesses, so rig up
3946 // an IR builder.
3947 IRBuilderTy IRB(&AI);
3948
3949 // Collect the new slices which we will merge into the alloca slices.
3950 SmallVector<Slice, 4> NewSlices;
3951
3952 // Track any allocas we end up splitting loads and stores for so we iterate
3953 // on them.
3954 SmallPtrSet<AllocaInst *, 4> ResplitPromotableAllocas;
3955
3956 // At this point, we have collected all of the loads and stores we can
3957 // pre-split, and the specific splits needed for them. We actually do the
3958 // splitting in a specific order in order to handle when one of the loads in
3959 // the value operand to one of the stores.
3960 //
3961 // First, we rewrite all of the split loads, and just accumulate each split
3962 // load in a parallel structure. We also build the slices for them and append
3963 // them to the alloca slices.
3964 SmallDenseMap<LoadInst *, std::vector<LoadInst *>, 1> SplitLoadsMap;
3965 std::vector<LoadInst *> SplitLoads;
3966 const DataLayout &DL = AI.getModule()->getDataLayout();
3967 for (LoadInst *LI : Loads) {
3968 SplitLoads.clear();
3969
3970 auto &Offsets = SplitOffsetsMap[LI];
3971 unsigned SliceSize = Offsets.S->endOffset() - Offsets.S->beginOffset();
3972 assert(LI->getType()->getIntegerBitWidth() % 8 == 0 &&
3973 "Load must have type size equal to store size");
3974 assert(LI->getType()->getIntegerBitWidth() / 8 >= SliceSize &&
3975 "Load must be >= slice size");
3976
3977 uint64_t BaseOffset = Offsets.S->beginOffset();
3978 assert(BaseOffset + SliceSize > BaseOffset &&
3979 "Cannot represent alloca access size using 64-bit integers!");
3980
3981 Instruction *BasePtr = cast<Instruction>(LI->getPointerOperand());
3982 IRB.SetInsertPoint(LI);
3983
3984 LLVM_DEBUG(dbgs() << " Splitting load: " << *LI << "\n");
3985
3986 uint64_t PartOffset = 0, PartSize = Offsets.Splits.front();
3987 int Idx = 0, Size = Offsets.Splits.size();
3988 for (;;) {
3989 auto *PartTy = Type::getIntNTy(LI->getContext(), PartSize * 8);
3990 auto AS = LI->getPointerAddressSpace();
3991 auto *PartPtrTy = PartTy->getPointerTo(AS);
3992 LoadInst *PLoad = IRB.CreateAlignedLoad(
3993 PartTy,
3994 getAdjustedPtr(IRB, DL, BasePtr,
3995 APInt(DL.getIndexSizeInBits(AS), PartOffset),
3996 PartPtrTy, BasePtr->getName() + "."),
3997 getAdjustedAlignment(LI, PartOffset),
3998 /*IsVolatile*/ false, LI->getName());
3999 PLoad->copyMetadata(*LI, {LLVMContext::MD_mem_parallel_loop_access,
4000 LLVMContext::MD_access_group});
4001
4002 // Append this load onto the list of split loads so we can find it later
4003 // to rewrite the stores.
4004 SplitLoads.push_back(PLoad);
4005
4006 // Now build a new slice for the alloca.
4007 NewSlices.push_back(
4008 Slice(BaseOffset + PartOffset, BaseOffset + PartOffset + PartSize,
4009 &PLoad->getOperandUse(PLoad->getPointerOperandIndex()),
4010 /*IsSplittable*/ false));
4011 LLVM_DEBUG(dbgs() << " new slice [" << NewSlices.back().beginOffset()
4012 << ", " << NewSlices.back().endOffset()
4013 << "): " << *PLoad << "\n");
4014
4015 // See if we've handled all the splits.
4016 if (Idx >= Size)
4017 break;
4018
4019 // Setup the next partition.
4020 PartOffset = Offsets.Splits[Idx];
4021 ++Idx;
4022 PartSize = (Idx < Size ? Offsets.Splits[Idx] : SliceSize) - PartOffset;
4023 }
4024
4025 // Now that we have the split loads, do the slow walk over all uses of the
4026 // load and rewrite them as split stores, or save the split loads to use
4027 // below if the store is going to be split there anyways.
4028 bool DeferredStores = false;
4029 for (User *LU : LI->users()) {
4030 StoreInst *SI = cast<StoreInst>(LU);
4031 if (!Stores.empty() && SplitOffsetsMap.count(SI)) {
4032 DeferredStores = true;
4033 LLVM_DEBUG(dbgs() << " Deferred splitting of store: " << *SI
4034 << "\n");
4035 continue;
4036 }
4037
4038 Value *StoreBasePtr = SI->getPointerOperand();
4039 IRB.SetInsertPoint(SI);
4040
4041 LLVM_DEBUG(dbgs() << " Splitting store of load: " << *SI << "\n");
4042
4043 for (int Idx = 0, Size = SplitLoads.size(); Idx < Size; ++Idx) {
4044 LoadInst *PLoad = SplitLoads[Idx];
4045 uint64_t PartOffset = Idx == 0 ? 0 : Offsets.Splits[Idx - 1];
4046 auto *PartPtrTy =
4047 PLoad->getType()->getPointerTo(SI->getPointerAddressSpace());
4048
4049 auto AS = SI->getPointerAddressSpace();
4050 StoreInst *PStore = IRB.CreateAlignedStore(
4051 PLoad,
4052 getAdjustedPtr(IRB, DL, StoreBasePtr,
4053 APInt(DL.getIndexSizeInBits(AS), PartOffset),
4054 PartPtrTy, StoreBasePtr->getName() + "."),
4055 getAdjustedAlignment(SI, PartOffset),
4056 /*IsVolatile*/ false);
4057 PStore->copyMetadata(*SI, {LLVMContext::MD_mem_parallel_loop_access,
4058 LLVMContext::MD_access_group});
4059 LLVM_DEBUG(dbgs() << " +" << PartOffset << ":" << *PStore << "\n");
4060 }
4061
4062 // We want to immediately iterate on any allocas impacted by splitting
4063 // this store, and we have to track any promotable alloca (indicated by
4064 // a direct store) as needing to be resplit because it is no longer
4065 // promotable.
4066 if (AllocaInst *OtherAI = dyn_cast<AllocaInst>(StoreBasePtr)) {
4067 ResplitPromotableAllocas.insert(OtherAI);
4068 Worklist.insert(OtherAI);
4069 } else if (AllocaInst *OtherAI = dyn_cast<AllocaInst>(
4070 StoreBasePtr->stripInBoundsOffsets())) {
4071 Worklist.insert(OtherAI);
4072 }
4073
4074 // Mark the original store as dead.
4075 DeadInsts.push_back(SI);
4076 }
4077
4078 // Save the split loads if there are deferred stores among the users.
4079 if (DeferredStores)
4080 SplitLoadsMap.insert(std::make_pair(LI, std::move(SplitLoads)));
4081
4082 // Mark the original load as dead and kill the original slice.
4083 DeadInsts.push_back(LI);
4084 Offsets.S->kill();
4085 }
4086
4087 // Second, we rewrite all of the split stores. At this point, we know that
4088 // all loads from this alloca have been split already. For stores of such
4089 // loads, we can simply look up the pre-existing split loads. For stores of
4090 // other loads, we split those loads first and then write split stores of
4091 // them.
4092 for (StoreInst *SI : Stores) {
4093 auto *LI = cast<LoadInst>(SI->getValueOperand());
4094 IntegerType *Ty = cast<IntegerType>(LI->getType());
4095 assert(Ty->getBitWidth() % 8 == 0);
4096 uint64_t StoreSize = Ty->getBitWidth() / 8;
4097 assert(StoreSize > 0 && "Cannot have a zero-sized integer store!");
4098
4099 auto &Offsets = SplitOffsetsMap[SI];
4100 assert(StoreSize == Offsets.S->endOffset() - Offsets.S->beginOffset() &&
4101 "Slice size should always match load size exactly!");
4102 uint64_t BaseOffset = Offsets.S->beginOffset();
4103 assert(BaseOffset + StoreSize > BaseOffset &&
4104 "Cannot represent alloca access size using 64-bit integers!");
4105
4106 Value *LoadBasePtr = LI->getPointerOperand();
4107 Instruction *StoreBasePtr = cast<Instruction>(SI->getPointerOperand());
4108
4109 LLVM_DEBUG(dbgs() << " Splitting store: " << *SI << "\n");
4110
4111 // Check whether we have an already split load.
4112 auto SplitLoadsMapI = SplitLoadsMap.find(LI);
4113 std::vector<LoadInst *> *SplitLoads = nullptr;
4114 if (SplitLoadsMapI != SplitLoadsMap.end()) {
4115 SplitLoads = &SplitLoadsMapI->second;
4116 assert(SplitLoads->size() == Offsets.Splits.size() + 1 &&
4117 "Too few split loads for the number of splits in the store!");
4118 } else {
4119 LLVM_DEBUG(dbgs() << " of load: " << *LI << "\n");
4120 }
4121
4122 uint64_t PartOffset = 0, PartSize = Offsets.Splits.front();
4123 int Idx = 0, Size = Offsets.Splits.size();
4124 for (;;) {
4125 auto *PartTy = Type::getIntNTy(Ty->getContext(), PartSize * 8);
4126 auto *LoadPartPtrTy = PartTy->getPointerTo(LI->getPointerAddressSpace());
4127 auto *StorePartPtrTy = PartTy->getPointerTo(SI->getPointerAddressSpace());
4128
4129 // Either lookup a split load or create one.
4130 LoadInst *PLoad;
4131 if (SplitLoads) {
4132 PLoad = (*SplitLoads)[Idx];
4133 } else {
4134 IRB.SetInsertPoint(LI);
4135 auto AS = LI->getPointerAddressSpace();
4136 PLoad = IRB.CreateAlignedLoad(
4137 PartTy,
4138 getAdjustedPtr(IRB, DL, LoadBasePtr,
4139 APInt(DL.getIndexSizeInBits(AS), PartOffset),
4140 LoadPartPtrTy, LoadBasePtr->getName() + "."),
4141 getAdjustedAlignment(LI, PartOffset),
4142 /*IsVolatile*/ false, LI->getName());
4143 PLoad->copyMetadata(*LI, {LLVMContext::MD_mem_parallel_loop_access,
4144 LLVMContext::MD_access_group});
4145 }
4146
4147 // And store this partition.
4148 IRB.SetInsertPoint(SI);
4149 auto AS = SI->getPointerAddressSpace();
4150 StoreInst *PStore = IRB.CreateAlignedStore(
4151 PLoad,
4152 getAdjustedPtr(IRB, DL, StoreBasePtr,
4153 APInt(DL.getIndexSizeInBits(AS), PartOffset),
4154 StorePartPtrTy, StoreBasePtr->getName() + "."),
4155 getAdjustedAlignment(SI, PartOffset),
4156 /*IsVolatile*/ false);
4157 PStore->copyMetadata(*SI, {LLVMContext::MD_mem_parallel_loop_access,
4158 LLVMContext::MD_access_group});
4159
4160 // Now build a new slice for the alloca.
4161 NewSlices.push_back(
4162 Slice(BaseOffset + PartOffset, BaseOffset + PartOffset + PartSize,
4163 &PStore->getOperandUse(PStore->getPointerOperandIndex()),
4164 /*IsSplittable*/ false));
4165 LLVM_DEBUG(dbgs() << " new slice [" << NewSlices.back().beginOffset()
4166 << ", " << NewSlices.back().endOffset()
4167 << "): " << *PStore << "\n");
4168 if (!SplitLoads) {
4169 LLVM_DEBUG(dbgs() << " of split load: " << *PLoad << "\n");
4170 }
4171
4172 // See if we've finished all the splits.
4173 if (Idx >= Size)
4174 break;
4175
4176 // Setup the next partition.
4177 PartOffset = Offsets.Splits[Idx];
4178 ++Idx;
4179 PartSize = (Idx < Size ? Offsets.Splits[Idx] : StoreSize) - PartOffset;
4180 }
4181
4182 // We want to immediately iterate on any allocas impacted by splitting
4183 // this load, which is only relevant if it isn't a load of this alloca and
4184 // thus we didn't already split the loads above. We also have to keep track
4185 // of any promotable allocas we split loads on as they can no longer be
4186 // promoted.
4187 if (!SplitLoads) {
4188 if (AllocaInst *OtherAI = dyn_cast<AllocaInst>(LoadBasePtr)) {
4189 assert(OtherAI != &AI && "We can't re-split our own alloca!");
4190 ResplitPromotableAllocas.insert(OtherAI);
4191 Worklist.insert(OtherAI);
4192 } else if (AllocaInst *OtherAI = dyn_cast<AllocaInst>(
4193 LoadBasePtr->stripInBoundsOffsets())) {
4194 assert(OtherAI != &AI && "We can't re-split our own alloca!");
4195 Worklist.insert(OtherAI);
4196 }
4197 }
4198
4199 // Mark the original store as dead now that we've split it up and kill its
4200 // slice. Note that we leave the original load in place unless this store
4201 // was its only use. It may in turn be split up if it is an alloca load
4202 // for some other alloca, but it may be a normal load. This may introduce
4203 // redundant loads, but where those can be merged the rest of the optimizer
4204 // should handle the merging, and this uncovers SSA splits which is more
4205 // important. In practice, the original loads will almost always be fully
4206 // split and removed eventually, and the splits will be merged by any
4207 // trivial CSE, including instcombine.
4208 if (LI->hasOneUse()) {
4209 assert(*LI->user_begin() == SI && "Single use isn't this store!");
4210 DeadInsts.push_back(LI);
4211 }
4212 DeadInsts.push_back(SI);
4213 Offsets.S->kill();
4214 }
4215
4216 // Remove the killed slices that have ben pre-split.
4217 llvm::erase_if(AS, [](const Slice &S) { return S.isDead(); });
4218
4219 // Insert our new slices. This will sort and merge them into the sorted
4220 // sequence.
4221 AS.insert(NewSlices);
4222
4223 LLVM_DEBUG(dbgs() << " Pre-split slices:\n");
4224 #ifndef NDEBUG
4225 for (auto I = AS.begin(), E = AS.end(); I != E; ++I)
4226 LLVM_DEBUG(AS.print(dbgs(), I, " "));
4227 #endif
4228
4229 // Finally, don't try to promote any allocas that new require re-splitting.
4230 // They have already been added to the worklist above.
4231 llvm::erase_if(PromotableAllocas, [&](AllocaInst *AI) {
4232 return ResplitPromotableAllocas.count(AI);
4233 });
4234
4235 return true;
4236 }
4237
4238 /// Rewrite an alloca partition's users.
4239 ///
4240 /// This routine drives both of the rewriting goals of the SROA pass. It tries
4241 /// to rewrite uses of an alloca partition to be conducive for SSA value
4242 /// promotion. If the partition needs a new, more refined alloca, this will
4243 /// build that new alloca, preserving as much type information as possible, and
4244 /// rewrite the uses of the old alloca to point at the new one and have the
4245 /// appropriate new offsets. It also evaluates how successful the rewrite was
4246 /// at enabling promotion and if it was successful queues the alloca to be
4247 /// promoted.
rewritePartition(AllocaInst & AI,AllocaSlices & AS,Partition & P)4248 AllocaInst *SROAPass::rewritePartition(AllocaInst &AI, AllocaSlices &AS,
4249 Partition &P) {
4250 // Try to compute a friendly type for this partition of the alloca. This
4251 // won't always succeed, in which case we fall back to a legal integer type
4252 // or an i8 array of an appropriate size.
4253 Type *SliceTy = nullptr;
4254 const DataLayout &DL = AI.getModule()->getDataLayout();
4255 std::pair<Type *, IntegerType *> CommonUseTy =
4256 findCommonType(P.begin(), P.end(), P.endOffset());
4257 // Do all uses operate on the same type?
4258 if (CommonUseTy.first)
4259 if (DL.getTypeAllocSize(CommonUseTy.first).getFixedSize() >= P.size())
4260 SliceTy = CommonUseTy.first;
4261 // If not, can we find an appropriate subtype in the original allocated type?
4262 if (!SliceTy)
4263 if (Type *TypePartitionTy = getTypePartition(DL, AI.getAllocatedType(),
4264 P.beginOffset(), P.size()))
4265 SliceTy = TypePartitionTy;
4266 // If still not, can we use the largest bitwidth integer type used?
4267 if (!SliceTy && CommonUseTy.second)
4268 if (DL.getTypeAllocSize(CommonUseTy.second).getFixedSize() >= P.size())
4269 SliceTy = CommonUseTy.second;
4270 if ((!SliceTy || (SliceTy->isArrayTy() &&
4271 SliceTy->getArrayElementType()->isIntegerTy())) &&
4272 DL.isLegalInteger(P.size() * 8))
4273 SliceTy = Type::getIntNTy(*C, P.size() * 8);
4274 if (!SliceTy)
4275 SliceTy = ArrayType::get(Type::getInt8Ty(*C), P.size());
4276 assert(DL.getTypeAllocSize(SliceTy).getFixedSize() >= P.size());
4277
4278 bool IsIntegerPromotable = isIntegerWideningViable(P, SliceTy, DL);
4279
4280 VectorType *VecTy =
4281 IsIntegerPromotable ? nullptr : isVectorPromotionViable(P, DL);
4282 if (VecTy)
4283 SliceTy = VecTy;
4284
4285 // Check for the case where we're going to rewrite to a new alloca of the
4286 // exact same type as the original, and with the same access offsets. In that
4287 // case, re-use the existing alloca, but still run through the rewriter to
4288 // perform phi and select speculation.
4289 // P.beginOffset() can be non-zero even with the same type in a case with
4290 // out-of-bounds access (e.g. @PR35657 function in SROA/basictest.ll).
4291 AllocaInst *NewAI;
4292 if (SliceTy == AI.getAllocatedType() && P.beginOffset() == 0) {
4293 NewAI = &AI;
4294 // FIXME: We should be able to bail at this point with "nothing changed".
4295 // FIXME: We might want to defer PHI speculation until after here.
4296 // FIXME: return nullptr;
4297 } else {
4298 // Make sure the alignment is compatible with P.beginOffset().
4299 const Align Alignment = commonAlignment(AI.getAlign(), P.beginOffset());
4300 // If we will get at least this much alignment from the type alone, leave
4301 // the alloca's alignment unconstrained.
4302 const bool IsUnconstrained = Alignment <= DL.getABITypeAlign(SliceTy);
4303 NewAI = new AllocaInst(
4304 SliceTy, AI.getType()->getAddressSpace(), nullptr,
4305 IsUnconstrained ? DL.getPrefTypeAlign(SliceTy) : Alignment,
4306 AI.getName() + ".sroa." + Twine(P.begin() - AS.begin()), &AI);
4307 // Copy the old AI debug location over to the new one.
4308 NewAI->setDebugLoc(AI.getDebugLoc());
4309 ++NumNewAllocas;
4310 }
4311
4312 LLVM_DEBUG(dbgs() << "Rewriting alloca partition "
4313 << "[" << P.beginOffset() << "," << P.endOffset()
4314 << ") to: " << *NewAI << "\n");
4315
4316 // Track the high watermark on the worklist as it is only relevant for
4317 // promoted allocas. We will reset it to this point if the alloca is not in
4318 // fact scheduled for promotion.
4319 unsigned PPWOldSize = PostPromotionWorklist.size();
4320 unsigned NumUses = 0;
4321 SmallSetVector<PHINode *, 8> PHIUsers;
4322 SmallSetVector<SelectInst *, 8> SelectUsers;
4323
4324 AllocaSliceRewriter Rewriter(DL, AS, *this, AI, *NewAI, P.beginOffset(),
4325 P.endOffset(), IsIntegerPromotable, VecTy,
4326 PHIUsers, SelectUsers);
4327 bool Promotable = true;
4328 for (Slice *S : P.splitSliceTails()) {
4329 Promotable &= Rewriter.visit(S);
4330 ++NumUses;
4331 }
4332 for (Slice &S : P) {
4333 Promotable &= Rewriter.visit(&S);
4334 ++NumUses;
4335 }
4336
4337 NumAllocaPartitionUses += NumUses;
4338 MaxUsesPerAllocaPartition.updateMax(NumUses);
4339
4340 // Now that we've processed all the slices in the new partition, check if any
4341 // PHIs or Selects would block promotion.
4342 for (PHINode *PHI : PHIUsers)
4343 if (!isSafePHIToSpeculate(*PHI)) {
4344 Promotable = false;
4345 PHIUsers.clear();
4346 SelectUsers.clear();
4347 break;
4348 }
4349
4350 for (SelectInst *Sel : SelectUsers)
4351 if (!isSafeSelectToSpeculate(*Sel)) {
4352 Promotable = false;
4353 PHIUsers.clear();
4354 SelectUsers.clear();
4355 break;
4356 }
4357
4358 if (Promotable) {
4359 for (Use *U : AS.getDeadUsesIfPromotable()) {
4360 auto *OldInst = dyn_cast<Instruction>(U->get());
4361 Value::dropDroppableUse(*U);
4362 if (OldInst)
4363 if (isInstructionTriviallyDead(OldInst))
4364 DeadInsts.push_back(OldInst);
4365 }
4366 if (PHIUsers.empty() && SelectUsers.empty()) {
4367 // Promote the alloca.
4368 PromotableAllocas.push_back(NewAI);
4369 } else {
4370 // If we have either PHIs or Selects to speculate, add them to those
4371 // worklists and re-queue the new alloca so that we promote in on the
4372 // next iteration.
4373 for (PHINode *PHIUser : PHIUsers)
4374 SpeculatablePHIs.insert(PHIUser);
4375 for (SelectInst *SelectUser : SelectUsers)
4376 SpeculatableSelects.insert(SelectUser);
4377 Worklist.insert(NewAI);
4378 }
4379 } else {
4380 // Drop any post-promotion work items if promotion didn't happen.
4381 while (PostPromotionWorklist.size() > PPWOldSize)
4382 PostPromotionWorklist.pop_back();
4383
4384 // We couldn't promote and we didn't create a new partition, nothing
4385 // happened.
4386 if (NewAI == &AI)
4387 return nullptr;
4388
4389 // If we can't promote the alloca, iterate on it to check for new
4390 // refinements exposed by splitting the current alloca. Don't iterate on an
4391 // alloca which didn't actually change and didn't get promoted.
4392 Worklist.insert(NewAI);
4393 }
4394
4395 return NewAI;
4396 }
4397
4398 /// Walks the slices of an alloca and form partitions based on them,
4399 /// rewriting each of their uses.
splitAlloca(AllocaInst & AI,AllocaSlices & AS)4400 bool SROAPass::splitAlloca(AllocaInst &AI, AllocaSlices &AS) {
4401 if (AS.begin() == AS.end())
4402 return false;
4403
4404 unsigned NumPartitions = 0;
4405 bool Changed = false;
4406 const DataLayout &DL = AI.getModule()->getDataLayout();
4407
4408 // First try to pre-split loads and stores.
4409 Changed |= presplitLoadsAndStores(AI, AS);
4410
4411 // Now that we have identified any pre-splitting opportunities,
4412 // mark loads and stores unsplittable except for the following case.
4413 // We leave a slice splittable if all other slices are disjoint or fully
4414 // included in the slice, such as whole-alloca loads and stores.
4415 // If we fail to split these during pre-splitting, we want to force them
4416 // to be rewritten into a partition.
4417 bool IsSorted = true;
4418
4419 uint64_t AllocaSize =
4420 DL.getTypeAllocSize(AI.getAllocatedType()).getFixedSize();
4421 const uint64_t MaxBitVectorSize = 1024;
4422 if (AllocaSize <= MaxBitVectorSize) {
4423 // If a byte boundary is included in any load or store, a slice starting or
4424 // ending at the boundary is not splittable.
4425 SmallBitVector SplittableOffset(AllocaSize + 1, true);
4426 for (Slice &S : AS)
4427 for (unsigned O = S.beginOffset() + 1;
4428 O < S.endOffset() && O < AllocaSize; O++)
4429 SplittableOffset.reset(O);
4430
4431 for (Slice &S : AS) {
4432 if (!S.isSplittable())
4433 continue;
4434
4435 if ((S.beginOffset() > AllocaSize || SplittableOffset[S.beginOffset()]) &&
4436 (S.endOffset() > AllocaSize || SplittableOffset[S.endOffset()]))
4437 continue;
4438
4439 if (isa<LoadInst>(S.getUse()->getUser()) ||
4440 isa<StoreInst>(S.getUse()->getUser())) {
4441 S.makeUnsplittable();
4442 IsSorted = false;
4443 }
4444 }
4445 }
4446 else {
4447 // We only allow whole-alloca splittable loads and stores
4448 // for a large alloca to avoid creating too large BitVector.
4449 for (Slice &S : AS) {
4450 if (!S.isSplittable())
4451 continue;
4452
4453 if (S.beginOffset() == 0 && S.endOffset() >= AllocaSize)
4454 continue;
4455
4456 if (isa<LoadInst>(S.getUse()->getUser()) ||
4457 isa<StoreInst>(S.getUse()->getUser())) {
4458 S.makeUnsplittable();
4459 IsSorted = false;
4460 }
4461 }
4462 }
4463
4464 if (!IsSorted)
4465 llvm::sort(AS);
4466
4467 /// Describes the allocas introduced by rewritePartition in order to migrate
4468 /// the debug info.
4469 struct Fragment {
4470 AllocaInst *Alloca;
4471 uint64_t Offset;
4472 uint64_t Size;
4473 Fragment(AllocaInst *AI, uint64_t O, uint64_t S)
4474 : Alloca(AI), Offset(O), Size(S) {}
4475 };
4476 SmallVector<Fragment, 4> Fragments;
4477
4478 // Rewrite each partition.
4479 for (auto &P : AS.partitions()) {
4480 if (AllocaInst *NewAI = rewritePartition(AI, AS, P)) {
4481 Changed = true;
4482 if (NewAI != &AI) {
4483 uint64_t SizeOfByte = 8;
4484 uint64_t AllocaSize =
4485 DL.getTypeSizeInBits(NewAI->getAllocatedType()).getFixedSize();
4486 // Don't include any padding.
4487 uint64_t Size = std::min(AllocaSize, P.size() * SizeOfByte);
4488 Fragments.push_back(Fragment(NewAI, P.beginOffset() * SizeOfByte, Size));
4489 }
4490 }
4491 ++NumPartitions;
4492 }
4493
4494 NumAllocaPartitions += NumPartitions;
4495 MaxPartitionsPerAlloca.updateMax(NumPartitions);
4496
4497 // Migrate debug information from the old alloca to the new alloca(s)
4498 // and the individual partitions.
4499 TinyPtrVector<DbgVariableIntrinsic *> DbgDeclares = FindDbgAddrUses(&AI);
4500 for (DbgVariableIntrinsic *DbgDeclare : DbgDeclares) {
4501 auto *Expr = DbgDeclare->getExpression();
4502 DIBuilder DIB(*AI.getModule(), /*AllowUnresolved*/ false);
4503 uint64_t AllocaSize =
4504 DL.getTypeSizeInBits(AI.getAllocatedType()).getFixedSize();
4505 for (auto Fragment : Fragments) {
4506 // Create a fragment expression describing the new partition or reuse AI's
4507 // expression if there is only one partition.
4508 auto *FragmentExpr = Expr;
4509 if (Fragment.Size < AllocaSize || Expr->isFragment()) {
4510 // If this alloca is already a scalar replacement of a larger aggregate,
4511 // Fragment.Offset describes the offset inside the scalar.
4512 auto ExprFragment = Expr->getFragmentInfo();
4513 uint64_t Offset = ExprFragment ? ExprFragment->OffsetInBits : 0;
4514 uint64_t Start = Offset + Fragment.Offset;
4515 uint64_t Size = Fragment.Size;
4516 if (ExprFragment) {
4517 uint64_t AbsEnd =
4518 ExprFragment->OffsetInBits + ExprFragment->SizeInBits;
4519 if (Start >= AbsEnd)
4520 // No need to describe a SROAed padding.
4521 continue;
4522 Size = std::min(Size, AbsEnd - Start);
4523 }
4524 // The new, smaller fragment is stenciled out from the old fragment.
4525 if (auto OrigFragment = FragmentExpr->getFragmentInfo()) {
4526 assert(Start >= OrigFragment->OffsetInBits &&
4527 "new fragment is outside of original fragment");
4528 Start -= OrigFragment->OffsetInBits;
4529 }
4530
4531 // The alloca may be larger than the variable.
4532 auto VarSize = DbgDeclare->getVariable()->getSizeInBits();
4533 if (VarSize) {
4534 if (Size > *VarSize)
4535 Size = *VarSize;
4536 if (Size == 0 || Start + Size > *VarSize)
4537 continue;
4538 }
4539
4540 // Avoid creating a fragment expression that covers the entire variable.
4541 if (!VarSize || *VarSize != Size) {
4542 if (auto E =
4543 DIExpression::createFragmentExpression(Expr, Start, Size))
4544 FragmentExpr = *E;
4545 else
4546 continue;
4547 }
4548 }
4549
4550 // Remove any existing intrinsics on the new alloca describing
4551 // the variable fragment.
4552 for (DbgVariableIntrinsic *OldDII : FindDbgAddrUses(Fragment.Alloca)) {
4553 auto SameVariableFragment = [](const DbgVariableIntrinsic *LHS,
4554 const DbgVariableIntrinsic *RHS) {
4555 return LHS->getVariable() == RHS->getVariable() &&
4556 LHS->getDebugLoc()->getInlinedAt() ==
4557 RHS->getDebugLoc()->getInlinedAt();
4558 };
4559 if (SameVariableFragment(OldDII, DbgDeclare))
4560 OldDII->eraseFromParent();
4561 }
4562
4563 DIB.insertDeclare(Fragment.Alloca, DbgDeclare->getVariable(), FragmentExpr,
4564 DbgDeclare->getDebugLoc(), &AI);
4565 }
4566 }
4567 return Changed;
4568 }
4569
4570 /// Clobber a use with poison, deleting the used value if it becomes dead.
clobberUse(Use & U)4571 void SROAPass::clobberUse(Use &U) {
4572 Value *OldV = U;
4573 // Replace the use with an poison value.
4574 U = PoisonValue::get(OldV->getType());
4575
4576 // Check for this making an instruction dead. We have to garbage collect
4577 // all the dead instructions to ensure the uses of any alloca end up being
4578 // minimal.
4579 if (Instruction *OldI = dyn_cast<Instruction>(OldV))
4580 if (isInstructionTriviallyDead(OldI)) {
4581 DeadInsts.push_back(OldI);
4582 }
4583 }
4584
4585 /// Analyze an alloca for SROA.
4586 ///
4587 /// This analyzes the alloca to ensure we can reason about it, builds
4588 /// the slices of the alloca, and then hands it off to be split and
4589 /// rewritten as needed.
runOnAlloca(AllocaInst & AI)4590 bool SROAPass::runOnAlloca(AllocaInst &AI) {
4591 LLVM_DEBUG(dbgs() << "SROA alloca: " << AI << "\n");
4592 ++NumAllocasAnalyzed;
4593
4594 // Special case dead allocas, as they're trivial.
4595 if (AI.use_empty()) {
4596 AI.eraseFromParent();
4597 return true;
4598 }
4599 const DataLayout &DL = AI.getModule()->getDataLayout();
4600
4601 // Skip alloca forms that this analysis can't handle.
4602 auto *AT = AI.getAllocatedType();
4603 if (AI.isArrayAllocation() || !AT->isSized() || isa<ScalableVectorType>(AT) ||
4604 DL.getTypeAllocSize(AT).getFixedSize() == 0)
4605 return false;
4606
4607 bool Changed = false;
4608
4609 // First, split any FCA loads and stores touching this alloca to promote
4610 // better splitting and promotion opportunities.
4611 IRBuilderTy IRB(&AI);
4612 AggLoadStoreRewriter AggRewriter(DL, IRB);
4613 Changed |= AggRewriter.rewrite(AI);
4614
4615 // Build the slices using a recursive instruction-visiting builder.
4616 AllocaSlices AS(DL, AI);
4617 LLVM_DEBUG(AS.print(dbgs()));
4618 if (AS.isEscaped())
4619 return Changed;
4620
4621 // Delete all the dead users of this alloca before splitting and rewriting it.
4622 for (Instruction *DeadUser : AS.getDeadUsers()) {
4623 // Free up everything used by this instruction.
4624 for (Use &DeadOp : DeadUser->operands())
4625 clobberUse(DeadOp);
4626
4627 // Now replace the uses of this instruction.
4628 DeadUser->replaceAllUsesWith(PoisonValue::get(DeadUser->getType()));
4629
4630 // And mark it for deletion.
4631 DeadInsts.push_back(DeadUser);
4632 Changed = true;
4633 }
4634 for (Use *DeadOp : AS.getDeadOperands()) {
4635 clobberUse(*DeadOp);
4636 Changed = true;
4637 }
4638
4639 // No slices to split. Leave the dead alloca for a later pass to clean up.
4640 if (AS.begin() == AS.end())
4641 return Changed;
4642
4643 Changed |= splitAlloca(AI, AS);
4644
4645 LLVM_DEBUG(dbgs() << " Speculating PHIs\n");
4646 while (!SpeculatablePHIs.empty())
4647 speculatePHINodeLoads(IRB, *SpeculatablePHIs.pop_back_val());
4648
4649 LLVM_DEBUG(dbgs() << " Speculating Selects\n");
4650 while (!SpeculatableSelects.empty())
4651 speculateSelectInstLoads(IRB, *SpeculatableSelects.pop_back_val());
4652
4653 return Changed;
4654 }
4655
4656 /// Delete the dead instructions accumulated in this run.
4657 ///
4658 /// Recursively deletes the dead instructions we've accumulated. This is done
4659 /// at the very end to maximize locality of the recursive delete and to
4660 /// minimize the problems of invalidated instruction pointers as such pointers
4661 /// are used heavily in the intermediate stages of the algorithm.
4662 ///
4663 /// We also record the alloca instructions deleted here so that they aren't
4664 /// subsequently handed to mem2reg to promote.
deleteDeadInstructions(SmallPtrSetImpl<AllocaInst * > & DeletedAllocas)4665 bool SROAPass::deleteDeadInstructions(
4666 SmallPtrSetImpl<AllocaInst *> &DeletedAllocas) {
4667 bool Changed = false;
4668 while (!DeadInsts.empty()) {
4669 Instruction *I = dyn_cast_or_null<Instruction>(DeadInsts.pop_back_val());
4670 if (!I) continue;
4671 LLVM_DEBUG(dbgs() << "Deleting dead instruction: " << *I << "\n");
4672
4673 // If the instruction is an alloca, find the possible dbg.declare connected
4674 // to it, and remove it too. We must do this before calling RAUW or we will
4675 // not be able to find it.
4676 if (AllocaInst *AI = dyn_cast<AllocaInst>(I)) {
4677 DeletedAllocas.insert(AI);
4678 for (DbgVariableIntrinsic *OldDII : FindDbgAddrUses(AI))
4679 OldDII->eraseFromParent();
4680 }
4681
4682 I->replaceAllUsesWith(UndefValue::get(I->getType()));
4683
4684 for (Use &Operand : I->operands())
4685 if (Instruction *U = dyn_cast<Instruction>(Operand)) {
4686 // Zero out the operand and see if it becomes trivially dead.
4687 Operand = nullptr;
4688 if (isInstructionTriviallyDead(U))
4689 DeadInsts.push_back(U);
4690 }
4691
4692 ++NumDeleted;
4693 I->eraseFromParent();
4694 Changed = true;
4695 }
4696 return Changed;
4697 }
4698
4699 /// Promote the allocas, using the best available technique.
4700 ///
4701 /// This attempts to promote whatever allocas have been identified as viable in
4702 /// the PromotableAllocas list. If that list is empty, there is nothing to do.
4703 /// This function returns whether any promotion occurred.
promoteAllocas(Function & F)4704 bool SROAPass::promoteAllocas(Function &F) {
4705 if (PromotableAllocas.empty())
4706 return false;
4707
4708 NumPromoted += PromotableAllocas.size();
4709
4710 LLVM_DEBUG(dbgs() << "Promoting allocas with mem2reg...\n");
4711 PromoteMemToReg(PromotableAllocas, *DT, AC);
4712 PromotableAllocas.clear();
4713 return true;
4714 }
4715
runImpl(Function & F,DominatorTree & RunDT,AssumptionCache & RunAC)4716 PreservedAnalyses SROAPass::runImpl(Function &F, DominatorTree &RunDT,
4717 AssumptionCache &RunAC) {
4718 LLVM_DEBUG(dbgs() << "SROA function: " << F.getName() << "\n");
4719 C = &F.getContext();
4720 DT = &RunDT;
4721 AC = &RunAC;
4722
4723 BasicBlock &EntryBB = F.getEntryBlock();
4724 for (BasicBlock::iterator I = EntryBB.begin(), E = std::prev(EntryBB.end());
4725 I != E; ++I) {
4726 if (AllocaInst *AI = dyn_cast<AllocaInst>(I)) {
4727 if (isa<ScalableVectorType>(AI->getAllocatedType())) {
4728 if (isAllocaPromotable(AI))
4729 PromotableAllocas.push_back(AI);
4730 } else {
4731 Worklist.insert(AI);
4732 }
4733 }
4734 }
4735
4736 bool Changed = false;
4737 // A set of deleted alloca instruction pointers which should be removed from
4738 // the list of promotable allocas.
4739 SmallPtrSet<AllocaInst *, 4> DeletedAllocas;
4740
4741 do {
4742 while (!Worklist.empty()) {
4743 Changed |= runOnAlloca(*Worklist.pop_back_val());
4744 Changed |= deleteDeadInstructions(DeletedAllocas);
4745
4746 // Remove the deleted allocas from various lists so that we don't try to
4747 // continue processing them.
4748 if (!DeletedAllocas.empty()) {
4749 auto IsInSet = [&](AllocaInst *AI) { return DeletedAllocas.count(AI); };
4750 Worklist.remove_if(IsInSet);
4751 PostPromotionWorklist.remove_if(IsInSet);
4752 llvm::erase_if(PromotableAllocas, IsInSet);
4753 DeletedAllocas.clear();
4754 }
4755 }
4756
4757 Changed |= promoteAllocas(F);
4758
4759 Worklist = PostPromotionWorklist;
4760 PostPromotionWorklist.clear();
4761 } while (!Worklist.empty());
4762
4763 if (!Changed)
4764 return PreservedAnalyses::all();
4765
4766 PreservedAnalyses PA;
4767 PA.preserveSet<CFGAnalyses>();
4768 return PA;
4769 }
4770
run(Function & F,FunctionAnalysisManager & AM)4771 PreservedAnalyses SROAPass::run(Function &F, FunctionAnalysisManager &AM) {
4772 return runImpl(F, AM.getResult<DominatorTreeAnalysis>(F),
4773 AM.getResult<AssumptionAnalysis>(F));
4774 }
4775
4776 /// A legacy pass for the legacy pass manager that wraps the \c SROA pass.
4777 ///
4778 /// This is in the llvm namespace purely to allow it to be a friend of the \c
4779 /// SROA pass.
4780 class llvm::sroa::SROALegacyPass : public FunctionPass {
4781 /// The SROA implementation.
4782 SROAPass Impl;
4783
4784 public:
4785 static char ID;
4786
SROALegacyPass()4787 SROALegacyPass() : FunctionPass(ID) {
4788 initializeSROALegacyPassPass(*PassRegistry::getPassRegistry());
4789 }
4790
runOnFunction(Function & F)4791 bool runOnFunction(Function &F) override {
4792 if (skipFunction(F))
4793 return false;
4794
4795 auto PA = Impl.runImpl(
4796 F, getAnalysis<DominatorTreeWrapperPass>().getDomTree(),
4797 getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F));
4798 return !PA.areAllPreserved();
4799 }
4800
getAnalysisUsage(AnalysisUsage & AU) const4801 void getAnalysisUsage(AnalysisUsage &AU) const override {
4802 AU.addRequired<AssumptionCacheTracker>();
4803 AU.addRequired<DominatorTreeWrapperPass>();
4804 AU.addPreserved<GlobalsAAWrapperPass>();
4805 AU.setPreservesCFG();
4806 }
4807
getPassName() const4808 StringRef getPassName() const override { return "SROA"; }
4809 };
4810
4811 char SROALegacyPass::ID = 0;
4812
createSROAPass()4813 FunctionPass *llvm::createSROAPass() { return new SROALegacyPass(); }
4814
4815 INITIALIZE_PASS_BEGIN(SROALegacyPass, "sroa",
4816 "Scalar Replacement Of Aggregates", false, false)
4817 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
4818 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
4819 INITIALIZE_PASS_END(SROALegacyPass, "sroa", "Scalar Replacement Of Aggregates",
4820 false, false)
4821