1 //===- SROA.cpp - Scalar Replacement Of Aggregates ------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This transformation implements the well known scalar replacement of
10 /// aggregates transformation. It tries to identify promotable elements of an
11 /// aggregate alloca, and promote them to registers. It will also try to
12 /// convert uses of an element (or set of elements) of an alloca into a vector
13 /// or bitfield-style integer scalar if appropriate.
14 ///
15 /// It works to do this with minimal slicing of the alloca so that regions
16 /// which are merely transferred in and out of external memory remain unchanged
17 /// and are not decomposed to scalar code.
18 ///
19 /// Because this also performs alloca promotion, it can be thought of as also
20 /// serving the purpose of SSA formation. The algorithm iterates on the
21 /// function until all opportunities for promotion have been realized.
22 ///
23 //===----------------------------------------------------------------------===//
24 
25 #include "llvm/Transforms/Scalar/SROA.h"
26 #include "llvm/ADT/APInt.h"
27 #include "llvm/ADT/ArrayRef.h"
28 #include "llvm/ADT/DenseMap.h"
29 #include "llvm/ADT/PointerIntPair.h"
30 #include "llvm/ADT/STLExtras.h"
31 #include "llvm/ADT/SetVector.h"
32 #include "llvm/ADT/SmallBitVector.h"
33 #include "llvm/ADT/SmallPtrSet.h"
34 #include "llvm/ADT/SmallVector.h"
35 #include "llvm/ADT/Statistic.h"
36 #include "llvm/ADT/StringRef.h"
37 #include "llvm/ADT/Twine.h"
38 #include "llvm/ADT/iterator.h"
39 #include "llvm/ADT/iterator_range.h"
40 #include "llvm/Analysis/AssumptionCache.h"
41 #include "llvm/Analysis/GlobalsModRef.h"
42 #include "llvm/Analysis/Loads.h"
43 #include "llvm/Analysis/PtrUseVisitor.h"
44 #include "llvm/Config/llvm-config.h"
45 #include "llvm/IR/BasicBlock.h"
46 #include "llvm/IR/Constant.h"
47 #include "llvm/IR/ConstantFolder.h"
48 #include "llvm/IR/Constants.h"
49 #include "llvm/IR/DIBuilder.h"
50 #include "llvm/IR/DataLayout.h"
51 #include "llvm/IR/DebugInfoMetadata.h"
52 #include "llvm/IR/DerivedTypes.h"
53 #include "llvm/IR/Dominators.h"
54 #include "llvm/IR/Function.h"
55 #include "llvm/IR/GetElementPtrTypeIterator.h"
56 #include "llvm/IR/GlobalAlias.h"
57 #include "llvm/IR/IRBuilder.h"
58 #include "llvm/IR/InstVisitor.h"
59 #include "llvm/IR/InstrTypes.h"
60 #include "llvm/IR/Instruction.h"
61 #include "llvm/IR/Instructions.h"
62 #include "llvm/IR/IntrinsicInst.h"
63 #include "llvm/IR/Intrinsics.h"
64 #include "llvm/IR/LLVMContext.h"
65 #include "llvm/IR/Metadata.h"
66 #include "llvm/IR/Module.h"
67 #include "llvm/IR/Operator.h"
68 #include "llvm/IR/PassManager.h"
69 #include "llvm/IR/Type.h"
70 #include "llvm/IR/Use.h"
71 #include "llvm/IR/User.h"
72 #include "llvm/IR/Value.h"
73 #include "llvm/InitializePasses.h"
74 #include "llvm/Pass.h"
75 #include "llvm/Support/Casting.h"
76 #include "llvm/Support/CommandLine.h"
77 #include "llvm/Support/Compiler.h"
78 #include "llvm/Support/Debug.h"
79 #include "llvm/Support/ErrorHandling.h"
80 #include "llvm/Support/MathExtras.h"
81 #include "llvm/Support/raw_ostream.h"
82 #include "llvm/Transforms/Scalar.h"
83 #include "llvm/Transforms/Utils/Local.h"
84 #include "llvm/Transforms/Utils/PromoteMemToReg.h"
85 #include <algorithm>
86 #include <cassert>
87 #include <chrono>
88 #include <cstddef>
89 #include <cstdint>
90 #include <cstring>
91 #include <iterator>
92 #include <string>
93 #include <tuple>
94 #include <utility>
95 #include <vector>
96 
97 using namespace llvm;
98 using namespace llvm::sroa;
99 
100 #define DEBUG_TYPE "sroa"
101 
102 STATISTIC(NumAllocasAnalyzed, "Number of allocas analyzed for replacement");
103 STATISTIC(NumAllocaPartitions, "Number of alloca partitions formed");
104 STATISTIC(MaxPartitionsPerAlloca, "Maximum number of partitions per alloca");
105 STATISTIC(NumAllocaPartitionUses, "Number of alloca partition uses rewritten");
106 STATISTIC(MaxUsesPerAllocaPartition, "Maximum number of uses of a partition");
107 STATISTIC(NumNewAllocas, "Number of new, smaller allocas introduced");
108 STATISTIC(NumPromoted, "Number of allocas promoted to SSA values");
109 STATISTIC(NumLoadsSpeculated, "Number of loads speculated to allow promotion");
110 STATISTIC(NumDeleted, "Number of instructions deleted");
111 STATISTIC(NumVectorized, "Number of vectorized aggregates");
112 
113 /// Hidden option to experiment with completely strict handling of inbounds
114 /// GEPs.
115 static cl::opt<bool> SROAStrictInbounds("sroa-strict-inbounds", cl::init(false),
116                                         cl::Hidden);
117 
118 namespace {
119 
120 /// A custom IRBuilder inserter which prefixes all names, but only in
121 /// Assert builds.
122 class IRBuilderPrefixedInserter final : public IRBuilderDefaultInserter {
123   std::string Prefix;
124 
125   Twine getNameWithPrefix(const Twine &Name) const {
126     return Name.isTriviallyEmpty() ? Name : Prefix + Name;
127   }
128 
129 public:
130   void SetNamePrefix(const Twine &P) { Prefix = P.str(); }
131 
132   void InsertHelper(Instruction *I, const Twine &Name, BasicBlock *BB,
133                     BasicBlock::iterator InsertPt) const override {
134     IRBuilderDefaultInserter::InsertHelper(I, getNameWithPrefix(Name), BB,
135                                            InsertPt);
136   }
137 };
138 
139 /// Provide a type for IRBuilder that drops names in release builds.
140 using IRBuilderTy = IRBuilder<ConstantFolder, IRBuilderPrefixedInserter>;
141 
142 /// A used slice of an alloca.
143 ///
144 /// This structure represents a slice of an alloca used by some instruction. It
145 /// stores both the begin and end offsets of this use, a pointer to the use
146 /// itself, and a flag indicating whether we can classify the use as splittable
147 /// or not when forming partitions of the alloca.
148 class Slice {
149   /// The beginning offset of the range.
150   uint64_t BeginOffset = 0;
151 
152   /// The ending offset, not included in the range.
153   uint64_t EndOffset = 0;
154 
155   /// Storage for both the use of this slice and whether it can be
156   /// split.
157   PointerIntPair<Use *, 1, bool> UseAndIsSplittable;
158 
159 public:
160   Slice() = default;
161 
162   Slice(uint64_t BeginOffset, uint64_t EndOffset, Use *U, bool IsSplittable)
163       : BeginOffset(BeginOffset), EndOffset(EndOffset),
164         UseAndIsSplittable(U, IsSplittable) {}
165 
166   uint64_t beginOffset() const { return BeginOffset; }
167   uint64_t endOffset() const { return EndOffset; }
168 
169   bool isSplittable() const { return UseAndIsSplittable.getInt(); }
170   void makeUnsplittable() { UseAndIsSplittable.setInt(false); }
171 
172   Use *getUse() const { return UseAndIsSplittable.getPointer(); }
173 
174   bool isDead() const { return getUse() == nullptr; }
175   void kill() { UseAndIsSplittable.setPointer(nullptr); }
176 
177   /// Support for ordering ranges.
178   ///
179   /// This provides an ordering over ranges such that start offsets are
180   /// always increasing, and within equal start offsets, the end offsets are
181   /// decreasing. Thus the spanning range comes first in a cluster with the
182   /// same start position.
183   bool operator<(const Slice &RHS) const {
184     if (beginOffset() < RHS.beginOffset())
185       return true;
186     if (beginOffset() > RHS.beginOffset())
187       return false;
188     if (isSplittable() != RHS.isSplittable())
189       return !isSplittable();
190     if (endOffset() > RHS.endOffset())
191       return true;
192     return false;
193   }
194 
195   /// Support comparison with a single offset to allow binary searches.
196   friend LLVM_ATTRIBUTE_UNUSED bool operator<(const Slice &LHS,
197                                               uint64_t RHSOffset) {
198     return LHS.beginOffset() < RHSOffset;
199   }
200   friend LLVM_ATTRIBUTE_UNUSED bool operator<(uint64_t LHSOffset,
201                                               const Slice &RHS) {
202     return LHSOffset < RHS.beginOffset();
203   }
204 
205   bool operator==(const Slice &RHS) const {
206     return isSplittable() == RHS.isSplittable() &&
207            beginOffset() == RHS.beginOffset() && endOffset() == RHS.endOffset();
208   }
209   bool operator!=(const Slice &RHS) const { return !operator==(RHS); }
210 };
211 
212 } // end anonymous namespace
213 
214 /// Representation of the alloca slices.
215 ///
216 /// This class represents the slices of an alloca which are formed by its
217 /// various uses. If a pointer escapes, we can't fully build a representation
218 /// for the slices used and we reflect that in this structure. The uses are
219 /// stored, sorted by increasing beginning offset and with unsplittable slices
220 /// starting at a particular offset before splittable slices.
221 class llvm::sroa::AllocaSlices {
222 public:
223   /// Construct the slices of a particular alloca.
224   AllocaSlices(const DataLayout &DL, AllocaInst &AI);
225 
226   /// Test whether a pointer to the allocation escapes our analysis.
227   ///
228   /// If this is true, the slices are never fully built and should be
229   /// ignored.
230   bool isEscaped() const { return PointerEscapingInstr; }
231 
232   /// Support for iterating over the slices.
233   /// @{
234   using iterator = SmallVectorImpl<Slice>::iterator;
235   using range = iterator_range<iterator>;
236 
237   iterator begin() { return Slices.begin(); }
238   iterator end() { return Slices.end(); }
239 
240   using const_iterator = SmallVectorImpl<Slice>::const_iterator;
241   using const_range = iterator_range<const_iterator>;
242 
243   const_iterator begin() const { return Slices.begin(); }
244   const_iterator end() const { return Slices.end(); }
245   /// @}
246 
247   /// Erase a range of slices.
248   void erase(iterator Start, iterator Stop) { Slices.erase(Start, Stop); }
249 
250   /// Insert new slices for this alloca.
251   ///
252   /// This moves the slices into the alloca's slices collection, and re-sorts
253   /// everything so that the usual ordering properties of the alloca's slices
254   /// hold.
255   void insert(ArrayRef<Slice> NewSlices) {
256     int OldSize = Slices.size();
257     Slices.append(NewSlices.begin(), NewSlices.end());
258     auto SliceI = Slices.begin() + OldSize;
259     llvm::sort(SliceI, Slices.end());
260     std::inplace_merge(Slices.begin(), SliceI, Slices.end());
261   }
262 
263   // Forward declare the iterator and range accessor for walking the
264   // partitions.
265   class partition_iterator;
266   iterator_range<partition_iterator> partitions();
267 
268   /// Access the dead users for this alloca.
269   ArrayRef<Instruction *> getDeadUsers() const { return DeadUsers; }
270 
271   /// Access Uses that should be dropped if the alloca is promotable.
272   ArrayRef<Use *> getDeadUsesIfPromotable() const {
273     return DeadUseIfPromotable;
274   }
275 
276   /// Access the dead operands referring to this alloca.
277   ///
278   /// These are operands which have cannot actually be used to refer to the
279   /// alloca as they are outside its range and the user doesn't correct for
280   /// that. These mostly consist of PHI node inputs and the like which we just
281   /// need to replace with undef.
282   ArrayRef<Use *> getDeadOperands() const { return DeadOperands; }
283 
284 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
285   void print(raw_ostream &OS, const_iterator I, StringRef Indent = "  ") const;
286   void printSlice(raw_ostream &OS, const_iterator I,
287                   StringRef Indent = "  ") const;
288   void printUse(raw_ostream &OS, const_iterator I,
289                 StringRef Indent = "  ") const;
290   void print(raw_ostream &OS) const;
291   void dump(const_iterator I) const;
292   void dump() const;
293 #endif
294 
295 private:
296   template <typename DerivedT, typename RetT = void> class BuilderBase;
297   class SliceBuilder;
298 
299   friend class AllocaSlices::SliceBuilder;
300 
301 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
302   /// Handle to alloca instruction to simplify method interfaces.
303   AllocaInst &AI;
304 #endif
305 
306   /// The instruction responsible for this alloca not having a known set
307   /// of slices.
308   ///
309   /// When an instruction (potentially) escapes the pointer to the alloca, we
310   /// store a pointer to that here and abort trying to form slices of the
311   /// alloca. This will be null if the alloca slices are analyzed successfully.
312   Instruction *PointerEscapingInstr;
313 
314   /// The slices of the alloca.
315   ///
316   /// We store a vector of the slices formed by uses of the alloca here. This
317   /// vector is sorted by increasing begin offset, and then the unsplittable
318   /// slices before the splittable ones. See the Slice inner class for more
319   /// details.
320   SmallVector<Slice, 8> Slices;
321 
322   /// Instructions which will become dead if we rewrite the alloca.
323   ///
324   /// Note that these are not separated by slice. This is because we expect an
325   /// alloca to be completely rewritten or not rewritten at all. If rewritten,
326   /// all these instructions can simply be removed and replaced with poison as
327   /// they come from outside of the allocated space.
328   SmallVector<Instruction *, 8> DeadUsers;
329 
330   /// Uses which will become dead if can promote the alloca.
331   SmallVector<Use *, 8> DeadUseIfPromotable;
332 
333   /// Operands which will become dead if we rewrite the alloca.
334   ///
335   /// These are operands that in their particular use can be replaced with
336   /// poison when we rewrite the alloca. These show up in out-of-bounds inputs
337   /// to PHI nodes and the like. They aren't entirely dead (there might be
338   /// a GEP back into the bounds using it elsewhere) and nor is the PHI, but we
339   /// want to swap this particular input for poison to simplify the use lists of
340   /// the alloca.
341   SmallVector<Use *, 8> DeadOperands;
342 };
343 
344 /// A partition of the slices.
345 ///
346 /// An ephemeral representation for a range of slices which can be viewed as
347 /// a partition of the alloca. This range represents a span of the alloca's
348 /// memory which cannot be split, and provides access to all of the slices
349 /// overlapping some part of the partition.
350 ///
351 /// Objects of this type are produced by traversing the alloca's slices, but
352 /// are only ephemeral and not persistent.
353 class llvm::sroa::Partition {
354 private:
355   friend class AllocaSlices;
356   friend class AllocaSlices::partition_iterator;
357 
358   using iterator = AllocaSlices::iterator;
359 
360   /// The beginning and ending offsets of the alloca for this
361   /// partition.
362   uint64_t BeginOffset = 0, EndOffset = 0;
363 
364   /// The start and end iterators of this partition.
365   iterator SI, SJ;
366 
367   /// A collection of split slice tails overlapping the partition.
368   SmallVector<Slice *, 4> SplitTails;
369 
370   /// Raw constructor builds an empty partition starting and ending at
371   /// the given iterator.
372   Partition(iterator SI) : SI(SI), SJ(SI) {}
373 
374 public:
375   /// The start offset of this partition.
376   ///
377   /// All of the contained slices start at or after this offset.
378   uint64_t beginOffset() const { return BeginOffset; }
379 
380   /// The end offset of this partition.
381   ///
382   /// All of the contained slices end at or before this offset.
383   uint64_t endOffset() const { return EndOffset; }
384 
385   /// The size of the partition.
386   ///
387   /// Note that this can never be zero.
388   uint64_t size() const {
389     assert(BeginOffset < EndOffset && "Partitions must span some bytes!");
390     return EndOffset - BeginOffset;
391   }
392 
393   /// Test whether this partition contains no slices, and merely spans
394   /// a region occupied by split slices.
395   bool empty() const { return SI == SJ; }
396 
397   /// \name Iterate slices that start within the partition.
398   /// These may be splittable or unsplittable. They have a begin offset >= the
399   /// partition begin offset.
400   /// @{
401   // FIXME: We should probably define a "concat_iterator" helper and use that
402   // to stitch together pointee_iterators over the split tails and the
403   // contiguous iterators of the partition. That would give a much nicer
404   // interface here. We could then additionally expose filtered iterators for
405   // split, unsplit, and unsplittable splices based on the usage patterns.
406   iterator begin() const { return SI; }
407   iterator end() const { return SJ; }
408   /// @}
409 
410   /// Get the sequence of split slice tails.
411   ///
412   /// These tails are of slices which start before this partition but are
413   /// split and overlap into the partition. We accumulate these while forming
414   /// partitions.
415   ArrayRef<Slice *> splitSliceTails() const { return SplitTails; }
416 };
417 
418 /// An iterator over partitions of the alloca's slices.
419 ///
420 /// This iterator implements the core algorithm for partitioning the alloca's
421 /// slices. It is a forward iterator as we don't support backtracking for
422 /// efficiency reasons, and re-use a single storage area to maintain the
423 /// current set of split slices.
424 ///
425 /// It is templated on the slice iterator type to use so that it can operate
426 /// with either const or non-const slice iterators.
427 class AllocaSlices::partition_iterator
428     : public iterator_facade_base<partition_iterator, std::forward_iterator_tag,
429                                   Partition> {
430   friend class AllocaSlices;
431 
432   /// Most of the state for walking the partitions is held in a class
433   /// with a nice interface for examining them.
434   Partition P;
435 
436   /// We need to keep the end of the slices to know when to stop.
437   AllocaSlices::iterator SE;
438 
439   /// We also need to keep track of the maximum split end offset seen.
440   /// FIXME: Do we really?
441   uint64_t MaxSplitSliceEndOffset = 0;
442 
443   /// Sets the partition to be empty at given iterator, and sets the
444   /// end iterator.
445   partition_iterator(AllocaSlices::iterator SI, AllocaSlices::iterator SE)
446       : P(SI), SE(SE) {
447     // If not already at the end, advance our state to form the initial
448     // partition.
449     if (SI != SE)
450       advance();
451   }
452 
453   /// Advance the iterator to the next partition.
454   ///
455   /// Requires that the iterator not be at the end of the slices.
456   void advance() {
457     assert((P.SI != SE || !P.SplitTails.empty()) &&
458            "Cannot advance past the end of the slices!");
459 
460     // Clear out any split uses which have ended.
461     if (!P.SplitTails.empty()) {
462       if (P.EndOffset >= MaxSplitSliceEndOffset) {
463         // If we've finished all splits, this is easy.
464         P.SplitTails.clear();
465         MaxSplitSliceEndOffset = 0;
466       } else {
467         // Remove the uses which have ended in the prior partition. This
468         // cannot change the max split slice end because we just checked that
469         // the prior partition ended prior to that max.
470         llvm::erase_if(P.SplitTails,
471                        [&](Slice *S) { return S->endOffset() <= P.EndOffset; });
472         assert(llvm::any_of(P.SplitTails,
473                             [&](Slice *S) {
474                               return S->endOffset() == MaxSplitSliceEndOffset;
475                             }) &&
476                "Could not find the current max split slice offset!");
477         assert(llvm::all_of(P.SplitTails,
478                             [&](Slice *S) {
479                               return S->endOffset() <= MaxSplitSliceEndOffset;
480                             }) &&
481                "Max split slice end offset is not actually the max!");
482       }
483     }
484 
485     // If P.SI is already at the end, then we've cleared the split tail and
486     // now have an end iterator.
487     if (P.SI == SE) {
488       assert(P.SplitTails.empty() && "Failed to clear the split slices!");
489       return;
490     }
491 
492     // If we had a non-empty partition previously, set up the state for
493     // subsequent partitions.
494     if (P.SI != P.SJ) {
495       // Accumulate all the splittable slices which started in the old
496       // partition into the split list.
497       for (Slice &S : P)
498         if (S.isSplittable() && S.endOffset() > P.EndOffset) {
499           P.SplitTails.push_back(&S);
500           MaxSplitSliceEndOffset =
501               std::max(S.endOffset(), MaxSplitSliceEndOffset);
502         }
503 
504       // Start from the end of the previous partition.
505       P.SI = P.SJ;
506 
507       // If P.SI is now at the end, we at most have a tail of split slices.
508       if (P.SI == SE) {
509         P.BeginOffset = P.EndOffset;
510         P.EndOffset = MaxSplitSliceEndOffset;
511         return;
512       }
513 
514       // If the we have split slices and the next slice is after a gap and is
515       // not splittable immediately form an empty partition for the split
516       // slices up until the next slice begins.
517       if (!P.SplitTails.empty() && P.SI->beginOffset() != P.EndOffset &&
518           !P.SI->isSplittable()) {
519         P.BeginOffset = P.EndOffset;
520         P.EndOffset = P.SI->beginOffset();
521         return;
522       }
523     }
524 
525     // OK, we need to consume new slices. Set the end offset based on the
526     // current slice, and step SJ past it. The beginning offset of the
527     // partition is the beginning offset of the next slice unless we have
528     // pre-existing split slices that are continuing, in which case we begin
529     // at the prior end offset.
530     P.BeginOffset = P.SplitTails.empty() ? P.SI->beginOffset() : P.EndOffset;
531     P.EndOffset = P.SI->endOffset();
532     ++P.SJ;
533 
534     // There are two strategies to form a partition based on whether the
535     // partition starts with an unsplittable slice or a splittable slice.
536     if (!P.SI->isSplittable()) {
537       // When we're forming an unsplittable region, it must always start at
538       // the first slice and will extend through its end.
539       assert(P.BeginOffset == P.SI->beginOffset());
540 
541       // Form a partition including all of the overlapping slices with this
542       // unsplittable slice.
543       while (P.SJ != SE && P.SJ->beginOffset() < P.EndOffset) {
544         if (!P.SJ->isSplittable())
545           P.EndOffset = std::max(P.EndOffset, P.SJ->endOffset());
546         ++P.SJ;
547       }
548 
549       // We have a partition across a set of overlapping unsplittable
550       // partitions.
551       return;
552     }
553 
554     // If we're starting with a splittable slice, then we need to form
555     // a synthetic partition spanning it and any other overlapping splittable
556     // splices.
557     assert(P.SI->isSplittable() && "Forming a splittable partition!");
558 
559     // Collect all of the overlapping splittable slices.
560     while (P.SJ != SE && P.SJ->beginOffset() < P.EndOffset &&
561            P.SJ->isSplittable()) {
562       P.EndOffset = std::max(P.EndOffset, P.SJ->endOffset());
563       ++P.SJ;
564     }
565 
566     // Back upiP.EndOffset if we ended the span early when encountering an
567     // unsplittable slice. This synthesizes the early end offset of
568     // a partition spanning only splittable slices.
569     if (P.SJ != SE && P.SJ->beginOffset() < P.EndOffset) {
570       assert(!P.SJ->isSplittable());
571       P.EndOffset = P.SJ->beginOffset();
572     }
573   }
574 
575 public:
576   bool operator==(const partition_iterator &RHS) const {
577     assert(SE == RHS.SE &&
578            "End iterators don't match between compared partition iterators!");
579 
580     // The observed positions of partitions is marked by the P.SI iterator and
581     // the emptiness of the split slices. The latter is only relevant when
582     // P.SI == SE, as the end iterator will additionally have an empty split
583     // slices list, but the prior may have the same P.SI and a tail of split
584     // slices.
585     if (P.SI == RHS.P.SI && P.SplitTails.empty() == RHS.P.SplitTails.empty()) {
586       assert(P.SJ == RHS.P.SJ &&
587              "Same set of slices formed two different sized partitions!");
588       assert(P.SplitTails.size() == RHS.P.SplitTails.size() &&
589              "Same slice position with differently sized non-empty split "
590              "slice tails!");
591       return true;
592     }
593     return false;
594   }
595 
596   partition_iterator &operator++() {
597     advance();
598     return *this;
599   }
600 
601   Partition &operator*() { return P; }
602 };
603 
604 /// A forward range over the partitions of the alloca's slices.
605 ///
606 /// This accesses an iterator range over the partitions of the alloca's
607 /// slices. It computes these partitions on the fly based on the overlapping
608 /// offsets of the slices and the ability to split them. It will visit "empty"
609 /// partitions to cover regions of the alloca only accessed via split
610 /// slices.
611 iterator_range<AllocaSlices::partition_iterator> AllocaSlices::partitions() {
612   return make_range(partition_iterator(begin(), end()),
613                     partition_iterator(end(), end()));
614 }
615 
616 static Value *foldSelectInst(SelectInst &SI) {
617   // If the condition being selected on is a constant or the same value is
618   // being selected between, fold the select. Yes this does (rarely) happen
619   // early on.
620   if (ConstantInt *CI = dyn_cast<ConstantInt>(SI.getCondition()))
621     return SI.getOperand(1 + CI->isZero());
622   if (SI.getOperand(1) == SI.getOperand(2))
623     return SI.getOperand(1);
624 
625   return nullptr;
626 }
627 
628 /// A helper that folds a PHI node or a select.
629 static Value *foldPHINodeOrSelectInst(Instruction &I) {
630   if (PHINode *PN = dyn_cast<PHINode>(&I)) {
631     // If PN merges together the same value, return that value.
632     return PN->hasConstantValue();
633   }
634   return foldSelectInst(cast<SelectInst>(I));
635 }
636 
637 /// Builder for the alloca slices.
638 ///
639 /// This class builds a set of alloca slices by recursively visiting the uses
640 /// of an alloca and making a slice for each load and store at each offset.
641 class AllocaSlices::SliceBuilder : public PtrUseVisitor<SliceBuilder> {
642   friend class PtrUseVisitor<SliceBuilder>;
643   friend class InstVisitor<SliceBuilder>;
644 
645   using Base = PtrUseVisitor<SliceBuilder>;
646 
647   const uint64_t AllocSize;
648   AllocaSlices &AS;
649 
650   SmallDenseMap<Instruction *, unsigned> MemTransferSliceMap;
651   SmallDenseMap<Instruction *, uint64_t> PHIOrSelectSizes;
652 
653   /// Set to de-duplicate dead instructions found in the use walk.
654   SmallPtrSet<Instruction *, 4> VisitedDeadInsts;
655 
656 public:
657   SliceBuilder(const DataLayout &DL, AllocaInst &AI, AllocaSlices &AS)
658       : PtrUseVisitor<SliceBuilder>(DL),
659         AllocSize(DL.getTypeAllocSize(AI.getAllocatedType()).getFixedSize()),
660         AS(AS) {}
661 
662 private:
663   void markAsDead(Instruction &I) {
664     if (VisitedDeadInsts.insert(&I).second)
665       AS.DeadUsers.push_back(&I);
666   }
667 
668   void insertUse(Instruction &I, const APInt &Offset, uint64_t Size,
669                  bool IsSplittable = false) {
670     // Completely skip uses which have a zero size or start either before or
671     // past the end of the allocation.
672     if (Size == 0 || Offset.uge(AllocSize)) {
673       LLVM_DEBUG(dbgs() << "WARNING: Ignoring " << Size << " byte use @"
674                         << Offset
675                         << " which has zero size or starts outside of the "
676                         << AllocSize << " byte alloca:\n"
677                         << "    alloca: " << AS.AI << "\n"
678                         << "       use: " << I << "\n");
679       return markAsDead(I);
680     }
681 
682     uint64_t BeginOffset = Offset.getZExtValue();
683     uint64_t EndOffset = BeginOffset + Size;
684 
685     // Clamp the end offset to the end of the allocation. Note that this is
686     // formulated to handle even the case where "BeginOffset + Size" overflows.
687     // This may appear superficially to be something we could ignore entirely,
688     // but that is not so! There may be widened loads or PHI-node uses where
689     // some instructions are dead but not others. We can't completely ignore
690     // them, and so have to record at least the information here.
691     assert(AllocSize >= BeginOffset); // Established above.
692     if (Size > AllocSize - BeginOffset) {
693       LLVM_DEBUG(dbgs() << "WARNING: Clamping a " << Size << " byte use @"
694                         << Offset << " to remain within the " << AllocSize
695                         << " byte alloca:\n"
696                         << "    alloca: " << AS.AI << "\n"
697                         << "       use: " << I << "\n");
698       EndOffset = AllocSize;
699     }
700 
701     AS.Slices.push_back(Slice(BeginOffset, EndOffset, U, IsSplittable));
702   }
703 
704   void visitBitCastInst(BitCastInst &BC) {
705     if (BC.use_empty())
706       return markAsDead(BC);
707 
708     return Base::visitBitCastInst(BC);
709   }
710 
711   void visitAddrSpaceCastInst(AddrSpaceCastInst &ASC) {
712     if (ASC.use_empty())
713       return markAsDead(ASC);
714 
715     return Base::visitAddrSpaceCastInst(ASC);
716   }
717 
718   void visitGetElementPtrInst(GetElementPtrInst &GEPI) {
719     if (GEPI.use_empty())
720       return markAsDead(GEPI);
721 
722     if (SROAStrictInbounds && GEPI.isInBounds()) {
723       // FIXME: This is a manually un-factored variant of the basic code inside
724       // of GEPs with checking of the inbounds invariant specified in the
725       // langref in a very strict sense. If we ever want to enable
726       // SROAStrictInbounds, this code should be factored cleanly into
727       // PtrUseVisitor, but it is easier to experiment with SROAStrictInbounds
728       // by writing out the code here where we have the underlying allocation
729       // size readily available.
730       APInt GEPOffset = Offset;
731       const DataLayout &DL = GEPI.getModule()->getDataLayout();
732       for (gep_type_iterator GTI = gep_type_begin(GEPI),
733                              GTE = gep_type_end(GEPI);
734            GTI != GTE; ++GTI) {
735         ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand());
736         if (!OpC)
737           break;
738 
739         // Handle a struct index, which adds its field offset to the pointer.
740         if (StructType *STy = GTI.getStructTypeOrNull()) {
741           unsigned ElementIdx = OpC->getZExtValue();
742           const StructLayout *SL = DL.getStructLayout(STy);
743           GEPOffset +=
744               APInt(Offset.getBitWidth(), SL->getElementOffset(ElementIdx));
745         } else {
746           // For array or vector indices, scale the index by the size of the
747           // type.
748           APInt Index = OpC->getValue().sextOrTrunc(Offset.getBitWidth());
749           GEPOffset +=
750               Index *
751               APInt(Offset.getBitWidth(),
752                     DL.getTypeAllocSize(GTI.getIndexedType()).getFixedSize());
753         }
754 
755         // If this index has computed an intermediate pointer which is not
756         // inbounds, then the result of the GEP is a poison value and we can
757         // delete it and all uses.
758         if (GEPOffset.ugt(AllocSize))
759           return markAsDead(GEPI);
760       }
761     }
762 
763     return Base::visitGetElementPtrInst(GEPI);
764   }
765 
766   void handleLoadOrStore(Type *Ty, Instruction &I, const APInt &Offset,
767                          uint64_t Size, bool IsVolatile) {
768     // We allow splitting of non-volatile loads and stores where the type is an
769     // integer type. These may be used to implement 'memcpy' or other "transfer
770     // of bits" patterns.
771     bool IsSplittable =
772         Ty->isIntegerTy() && !IsVolatile && DL.typeSizeEqualsStoreSize(Ty);
773 
774     insertUse(I, Offset, Size, IsSplittable);
775   }
776 
777   void visitLoadInst(LoadInst &LI) {
778     assert((!LI.isSimple() || LI.getType()->isSingleValueType()) &&
779            "All simple FCA loads should have been pre-split");
780 
781     if (!IsOffsetKnown)
782       return PI.setAborted(&LI);
783 
784     if (LI.isVolatile() &&
785         LI.getPointerAddressSpace() != DL.getAllocaAddrSpace())
786       return PI.setAborted(&LI);
787 
788     if (isa<ScalableVectorType>(LI.getType()))
789       return PI.setAborted(&LI);
790 
791     uint64_t Size = DL.getTypeStoreSize(LI.getType()).getFixedSize();
792     return handleLoadOrStore(LI.getType(), LI, Offset, Size, LI.isVolatile());
793   }
794 
795   void visitStoreInst(StoreInst &SI) {
796     Value *ValOp = SI.getValueOperand();
797     if (ValOp == *U)
798       return PI.setEscapedAndAborted(&SI);
799     if (!IsOffsetKnown)
800       return PI.setAborted(&SI);
801 
802     if (SI.isVolatile() &&
803         SI.getPointerAddressSpace() != DL.getAllocaAddrSpace())
804       return PI.setAborted(&SI);
805 
806     if (isa<ScalableVectorType>(ValOp->getType()))
807       return PI.setAborted(&SI);
808 
809     uint64_t Size = DL.getTypeStoreSize(ValOp->getType()).getFixedSize();
810 
811     // If this memory access can be shown to *statically* extend outside the
812     // bounds of the allocation, it's behavior is undefined, so simply
813     // ignore it. Note that this is more strict than the generic clamping
814     // behavior of insertUse. We also try to handle cases which might run the
815     // risk of overflow.
816     // FIXME: We should instead consider the pointer to have escaped if this
817     // function is being instrumented for addressing bugs or race conditions.
818     if (Size > AllocSize || Offset.ugt(AllocSize - Size)) {
819       LLVM_DEBUG(dbgs() << "WARNING: Ignoring " << Size << " byte store @"
820                         << Offset << " which extends past the end of the "
821                         << AllocSize << " byte alloca:\n"
822                         << "    alloca: " << AS.AI << "\n"
823                         << "       use: " << SI << "\n");
824       return markAsDead(SI);
825     }
826 
827     assert((!SI.isSimple() || ValOp->getType()->isSingleValueType()) &&
828            "All simple FCA stores should have been pre-split");
829     handleLoadOrStore(ValOp->getType(), SI, Offset, Size, SI.isVolatile());
830   }
831 
832   void visitMemSetInst(MemSetInst &II) {
833     assert(II.getRawDest() == *U && "Pointer use is not the destination?");
834     ConstantInt *Length = dyn_cast<ConstantInt>(II.getLength());
835     if ((Length && Length->getValue() == 0) ||
836         (IsOffsetKnown && Offset.uge(AllocSize)))
837       // Zero-length mem transfer intrinsics can be ignored entirely.
838       return markAsDead(II);
839 
840     if (!IsOffsetKnown)
841       return PI.setAborted(&II);
842 
843     // Don't replace this with a store with a different address space.  TODO:
844     // Use a store with the casted new alloca?
845     if (II.isVolatile() && II.getDestAddressSpace() != DL.getAllocaAddrSpace())
846       return PI.setAborted(&II);
847 
848     insertUse(II, Offset, Length ? Length->getLimitedValue()
849                                  : AllocSize - Offset.getLimitedValue(),
850               (bool)Length);
851   }
852 
853   void visitMemTransferInst(MemTransferInst &II) {
854     ConstantInt *Length = dyn_cast<ConstantInt>(II.getLength());
855     if (Length && Length->getValue() == 0)
856       // Zero-length mem transfer intrinsics can be ignored entirely.
857       return markAsDead(II);
858 
859     // Because we can visit these intrinsics twice, also check to see if the
860     // first time marked this instruction as dead. If so, skip it.
861     if (VisitedDeadInsts.count(&II))
862       return;
863 
864     if (!IsOffsetKnown)
865       return PI.setAborted(&II);
866 
867     // Don't replace this with a load/store with a different address space.
868     // TODO: Use a store with the casted new alloca?
869     if (II.isVolatile() &&
870         (II.getDestAddressSpace() != DL.getAllocaAddrSpace() ||
871          II.getSourceAddressSpace() != DL.getAllocaAddrSpace()))
872       return PI.setAborted(&II);
873 
874     // This side of the transfer is completely out-of-bounds, and so we can
875     // nuke the entire transfer. However, we also need to nuke the other side
876     // if already added to our partitions.
877     // FIXME: Yet another place we really should bypass this when
878     // instrumenting for ASan.
879     if (Offset.uge(AllocSize)) {
880       SmallDenseMap<Instruction *, unsigned>::iterator MTPI =
881           MemTransferSliceMap.find(&II);
882       if (MTPI != MemTransferSliceMap.end())
883         AS.Slices[MTPI->second].kill();
884       return markAsDead(II);
885     }
886 
887     uint64_t RawOffset = Offset.getLimitedValue();
888     uint64_t Size = Length ? Length->getLimitedValue() : AllocSize - RawOffset;
889 
890     // Check for the special case where the same exact value is used for both
891     // source and dest.
892     if (*U == II.getRawDest() && *U == II.getRawSource()) {
893       // For non-volatile transfers this is a no-op.
894       if (!II.isVolatile())
895         return markAsDead(II);
896 
897       return insertUse(II, Offset, Size, /*IsSplittable=*/false);
898     }
899 
900     // If we have seen both source and destination for a mem transfer, then
901     // they both point to the same alloca.
902     bool Inserted;
903     SmallDenseMap<Instruction *, unsigned>::iterator MTPI;
904     std::tie(MTPI, Inserted) =
905         MemTransferSliceMap.insert(std::make_pair(&II, AS.Slices.size()));
906     unsigned PrevIdx = MTPI->second;
907     if (!Inserted) {
908       Slice &PrevP = AS.Slices[PrevIdx];
909 
910       // Check if the begin offsets match and this is a non-volatile transfer.
911       // In that case, we can completely elide the transfer.
912       if (!II.isVolatile() && PrevP.beginOffset() == RawOffset) {
913         PrevP.kill();
914         return markAsDead(II);
915       }
916 
917       // Otherwise we have an offset transfer within the same alloca. We can't
918       // split those.
919       PrevP.makeUnsplittable();
920     }
921 
922     // Insert the use now that we've fixed up the splittable nature.
923     insertUse(II, Offset, Size, /*IsSplittable=*/Inserted && Length);
924 
925     // Check that we ended up with a valid index in the map.
926     assert(AS.Slices[PrevIdx].getUse()->getUser() == &II &&
927            "Map index doesn't point back to a slice with this user.");
928   }
929 
930   // Disable SRoA for any intrinsics except for lifetime invariants and
931   // invariant group.
932   // FIXME: What about debug intrinsics? This matches old behavior, but
933   // doesn't make sense.
934   void visitIntrinsicInst(IntrinsicInst &II) {
935     if (II.isDroppable()) {
936       AS.DeadUseIfPromotable.push_back(U);
937       return;
938     }
939 
940     if (!IsOffsetKnown)
941       return PI.setAborted(&II);
942 
943     if (II.isLifetimeStartOrEnd()) {
944       ConstantInt *Length = cast<ConstantInt>(II.getArgOperand(0));
945       uint64_t Size = std::min(AllocSize - Offset.getLimitedValue(),
946                                Length->getLimitedValue());
947       insertUse(II, Offset, Size, true);
948       return;
949     }
950 
951     if (II.isLaunderOrStripInvariantGroup()) {
952       enqueueUsers(II);
953       return;
954     }
955 
956     Base::visitIntrinsicInst(II);
957   }
958 
959   Instruction *hasUnsafePHIOrSelectUse(Instruction *Root, uint64_t &Size) {
960     // We consider any PHI or select that results in a direct load or store of
961     // the same offset to be a viable use for slicing purposes. These uses
962     // are considered unsplittable and the size is the maximum loaded or stored
963     // size.
964     SmallPtrSet<Instruction *, 4> Visited;
965     SmallVector<std::pair<Instruction *, Instruction *>, 4> Uses;
966     Visited.insert(Root);
967     Uses.push_back(std::make_pair(cast<Instruction>(*U), Root));
968     const DataLayout &DL = Root->getModule()->getDataLayout();
969     // If there are no loads or stores, the access is dead. We mark that as
970     // a size zero access.
971     Size = 0;
972     do {
973       Instruction *I, *UsedI;
974       std::tie(UsedI, I) = Uses.pop_back_val();
975 
976       if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
977         Size = std::max(Size,
978                         DL.getTypeStoreSize(LI->getType()).getFixedSize());
979         continue;
980       }
981       if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
982         Value *Op = SI->getOperand(0);
983         if (Op == UsedI)
984           return SI;
985         Size = std::max(Size,
986                         DL.getTypeStoreSize(Op->getType()).getFixedSize());
987         continue;
988       }
989 
990       if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I)) {
991         if (!GEP->hasAllZeroIndices())
992           return GEP;
993       } else if (!isa<BitCastInst>(I) && !isa<PHINode>(I) &&
994                  !isa<SelectInst>(I) && !isa<AddrSpaceCastInst>(I)) {
995         return I;
996       }
997 
998       for (User *U : I->users())
999         if (Visited.insert(cast<Instruction>(U)).second)
1000           Uses.push_back(std::make_pair(I, cast<Instruction>(U)));
1001     } while (!Uses.empty());
1002 
1003     return nullptr;
1004   }
1005 
1006   void visitPHINodeOrSelectInst(Instruction &I) {
1007     assert(isa<PHINode>(I) || isa<SelectInst>(I));
1008     if (I.use_empty())
1009       return markAsDead(I);
1010 
1011     // If this is a PHI node before a catchswitch, we cannot insert any non-PHI
1012     // instructions in this BB, which may be required during rewriting. Bail out
1013     // on these cases.
1014     if (isa<PHINode>(I) &&
1015         I.getParent()->getFirstInsertionPt() == I.getParent()->end())
1016       return PI.setAborted(&I);
1017 
1018     // TODO: We could use SimplifyInstruction here to fold PHINodes and
1019     // SelectInsts. However, doing so requires to change the current
1020     // dead-operand-tracking mechanism. For instance, suppose neither loading
1021     // from %U nor %other traps. Then "load (select undef, %U, %other)" does not
1022     // trap either.  However, if we simply replace %U with undef using the
1023     // current dead-operand-tracking mechanism, "load (select undef, undef,
1024     // %other)" may trap because the select may return the first operand
1025     // "undef".
1026     if (Value *Result = foldPHINodeOrSelectInst(I)) {
1027       if (Result == *U)
1028         // If the result of the constant fold will be the pointer, recurse
1029         // through the PHI/select as if we had RAUW'ed it.
1030         enqueueUsers(I);
1031       else
1032         // Otherwise the operand to the PHI/select is dead, and we can replace
1033         // it with poison.
1034         AS.DeadOperands.push_back(U);
1035 
1036       return;
1037     }
1038 
1039     if (!IsOffsetKnown)
1040       return PI.setAborted(&I);
1041 
1042     // See if we already have computed info on this node.
1043     uint64_t &Size = PHIOrSelectSizes[&I];
1044     if (!Size) {
1045       // This is a new PHI/Select, check for an unsafe use of it.
1046       if (Instruction *UnsafeI = hasUnsafePHIOrSelectUse(&I, Size))
1047         return PI.setAborted(UnsafeI);
1048     }
1049 
1050     // For PHI and select operands outside the alloca, we can't nuke the entire
1051     // phi or select -- the other side might still be relevant, so we special
1052     // case them here and use a separate structure to track the operands
1053     // themselves which should be replaced with poison.
1054     // FIXME: This should instead be escaped in the event we're instrumenting
1055     // for address sanitization.
1056     if (Offset.uge(AllocSize)) {
1057       AS.DeadOperands.push_back(U);
1058       return;
1059     }
1060 
1061     insertUse(I, Offset, Size);
1062   }
1063 
1064   void visitPHINode(PHINode &PN) { visitPHINodeOrSelectInst(PN); }
1065 
1066   void visitSelectInst(SelectInst &SI) { visitPHINodeOrSelectInst(SI); }
1067 
1068   /// Disable SROA entirely if there are unhandled users of the alloca.
1069   void visitInstruction(Instruction &I) { PI.setAborted(&I); }
1070 };
1071 
1072 AllocaSlices::AllocaSlices(const DataLayout &DL, AllocaInst &AI)
1073     :
1074 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1075       AI(AI),
1076 #endif
1077       PointerEscapingInstr(nullptr) {
1078   SliceBuilder PB(DL, AI, *this);
1079   SliceBuilder::PtrInfo PtrI = PB.visitPtr(AI);
1080   if (PtrI.isEscaped() || PtrI.isAborted()) {
1081     // FIXME: We should sink the escape vs. abort info into the caller nicely,
1082     // possibly by just storing the PtrInfo in the AllocaSlices.
1083     PointerEscapingInstr = PtrI.getEscapingInst() ? PtrI.getEscapingInst()
1084                                                   : PtrI.getAbortingInst();
1085     assert(PointerEscapingInstr && "Did not track a bad instruction");
1086     return;
1087   }
1088 
1089   llvm::erase_if(Slices, [](const Slice &S) { return S.isDead(); });
1090 
1091   // Sort the uses. This arranges for the offsets to be in ascending order,
1092   // and the sizes to be in descending order.
1093   llvm::stable_sort(Slices);
1094 }
1095 
1096 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1097 
1098 void AllocaSlices::print(raw_ostream &OS, const_iterator I,
1099                          StringRef Indent) const {
1100   printSlice(OS, I, Indent);
1101   OS << "\n";
1102   printUse(OS, I, Indent);
1103 }
1104 
1105 void AllocaSlices::printSlice(raw_ostream &OS, const_iterator I,
1106                               StringRef Indent) const {
1107   OS << Indent << "[" << I->beginOffset() << "," << I->endOffset() << ")"
1108      << " slice #" << (I - begin())
1109      << (I->isSplittable() ? " (splittable)" : "");
1110 }
1111 
1112 void AllocaSlices::printUse(raw_ostream &OS, const_iterator I,
1113                             StringRef Indent) const {
1114   OS << Indent << "  used by: " << *I->getUse()->getUser() << "\n";
1115 }
1116 
1117 void AllocaSlices::print(raw_ostream &OS) const {
1118   if (PointerEscapingInstr) {
1119     OS << "Can't analyze slices for alloca: " << AI << "\n"
1120        << "  A pointer to this alloca escaped by:\n"
1121        << "  " << *PointerEscapingInstr << "\n";
1122     return;
1123   }
1124 
1125   OS << "Slices of alloca: " << AI << "\n";
1126   for (const_iterator I = begin(), E = end(); I != E; ++I)
1127     print(OS, I);
1128 }
1129 
1130 LLVM_DUMP_METHOD void AllocaSlices::dump(const_iterator I) const {
1131   print(dbgs(), I);
1132 }
1133 LLVM_DUMP_METHOD void AllocaSlices::dump() const { print(dbgs()); }
1134 
1135 #endif // !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1136 
1137 /// Walk the range of a partitioning looking for a common type to cover this
1138 /// sequence of slices.
1139 static std::pair<Type *, IntegerType *>
1140 findCommonType(AllocaSlices::const_iterator B, AllocaSlices::const_iterator E,
1141                uint64_t EndOffset) {
1142   Type *Ty = nullptr;
1143   bool TyIsCommon = true;
1144   IntegerType *ITy = nullptr;
1145 
1146   // Note that we need to look at *every* alloca slice's Use to ensure we
1147   // always get consistent results regardless of the order of slices.
1148   for (AllocaSlices::const_iterator I = B; I != E; ++I) {
1149     Use *U = I->getUse();
1150     if (isa<IntrinsicInst>(*U->getUser()))
1151       continue;
1152     if (I->beginOffset() != B->beginOffset() || I->endOffset() != EndOffset)
1153       continue;
1154 
1155     Type *UserTy = nullptr;
1156     if (LoadInst *LI = dyn_cast<LoadInst>(U->getUser())) {
1157       UserTy = LI->getType();
1158     } else if (StoreInst *SI = dyn_cast<StoreInst>(U->getUser())) {
1159       UserTy = SI->getValueOperand()->getType();
1160     }
1161 
1162     if (IntegerType *UserITy = dyn_cast_or_null<IntegerType>(UserTy)) {
1163       // If the type is larger than the partition, skip it. We only encounter
1164       // this for split integer operations where we want to use the type of the
1165       // entity causing the split. Also skip if the type is not a byte width
1166       // multiple.
1167       if (UserITy->getBitWidth() % 8 != 0 ||
1168           UserITy->getBitWidth() / 8 > (EndOffset - B->beginOffset()))
1169         continue;
1170 
1171       // Track the largest bitwidth integer type used in this way in case there
1172       // is no common type.
1173       if (!ITy || ITy->getBitWidth() < UserITy->getBitWidth())
1174         ITy = UserITy;
1175     }
1176 
1177     // To avoid depending on the order of slices, Ty and TyIsCommon must not
1178     // depend on types skipped above.
1179     if (!UserTy || (Ty && Ty != UserTy))
1180       TyIsCommon = false; // Give up on anything but an iN type.
1181     else
1182       Ty = UserTy;
1183   }
1184 
1185   return {TyIsCommon ? Ty : nullptr, ITy};
1186 }
1187 
1188 /// PHI instructions that use an alloca and are subsequently loaded can be
1189 /// rewritten to load both input pointers in the pred blocks and then PHI the
1190 /// results, allowing the load of the alloca to be promoted.
1191 /// From this:
1192 ///   %P2 = phi [i32* %Alloca, i32* %Other]
1193 ///   %V = load i32* %P2
1194 /// to:
1195 ///   %V1 = load i32* %Alloca      -> will be mem2reg'd
1196 ///   ...
1197 ///   %V2 = load i32* %Other
1198 ///   ...
1199 ///   %V = phi [i32 %V1, i32 %V2]
1200 ///
1201 /// We can do this to a select if its only uses are loads and if the operands
1202 /// to the select can be loaded unconditionally.
1203 ///
1204 /// FIXME: This should be hoisted into a generic utility, likely in
1205 /// Transforms/Util/Local.h
1206 static bool isSafePHIToSpeculate(PHINode &PN) {
1207   const DataLayout &DL = PN.getModule()->getDataLayout();
1208 
1209   // For now, we can only do this promotion if the load is in the same block
1210   // as the PHI, and if there are no stores between the phi and load.
1211   // TODO: Allow recursive phi users.
1212   // TODO: Allow stores.
1213   BasicBlock *BB = PN.getParent();
1214   Align MaxAlign;
1215   uint64_t APWidth = DL.getIndexTypeSizeInBits(PN.getType());
1216   APInt MaxSize(APWidth, 0);
1217   bool HaveLoad = false;
1218   for (User *U : PN.users()) {
1219     LoadInst *LI = dyn_cast<LoadInst>(U);
1220     if (!LI || !LI->isSimple())
1221       return false;
1222 
1223     // For now we only allow loads in the same block as the PHI.  This is
1224     // a common case that happens when instcombine merges two loads through
1225     // a PHI.
1226     if (LI->getParent() != BB)
1227       return false;
1228 
1229     // Ensure that there are no instructions between the PHI and the load that
1230     // could store.
1231     for (BasicBlock::iterator BBI(PN); &*BBI != LI; ++BBI)
1232       if (BBI->mayWriteToMemory())
1233         return false;
1234 
1235     uint64_t Size = DL.getTypeStoreSize(LI->getType()).getFixedSize();
1236     MaxAlign = std::max(MaxAlign, LI->getAlign());
1237     MaxSize = MaxSize.ult(Size) ? APInt(APWidth, Size) : MaxSize;
1238     HaveLoad = true;
1239   }
1240 
1241   if (!HaveLoad)
1242     return false;
1243 
1244   // We can only transform this if it is safe to push the loads into the
1245   // predecessor blocks. The only thing to watch out for is that we can't put
1246   // a possibly trapping load in the predecessor if it is a critical edge.
1247   for (unsigned Idx = 0, Num = PN.getNumIncomingValues(); Idx != Num; ++Idx) {
1248     Instruction *TI = PN.getIncomingBlock(Idx)->getTerminator();
1249     Value *InVal = PN.getIncomingValue(Idx);
1250 
1251     // If the value is produced by the terminator of the predecessor (an
1252     // invoke) or it has side-effects, there is no valid place to put a load
1253     // in the predecessor.
1254     if (TI == InVal || TI->mayHaveSideEffects())
1255       return false;
1256 
1257     // If the predecessor has a single successor, then the edge isn't
1258     // critical.
1259     if (TI->getNumSuccessors() == 1)
1260       continue;
1261 
1262     // If this pointer is always safe to load, or if we can prove that there
1263     // is already a load in the block, then we can move the load to the pred
1264     // block.
1265     if (isSafeToLoadUnconditionally(InVal, MaxAlign, MaxSize, DL, TI))
1266       continue;
1267 
1268     return false;
1269   }
1270 
1271   return true;
1272 }
1273 
1274 static void speculatePHINodeLoads(IRBuilderTy &IRB, PHINode &PN) {
1275   LLVM_DEBUG(dbgs() << "    original: " << PN << "\n");
1276 
1277   LoadInst *SomeLoad = cast<LoadInst>(PN.user_back());
1278   Type *LoadTy = SomeLoad->getType();
1279   IRB.SetInsertPoint(&PN);
1280   PHINode *NewPN = IRB.CreatePHI(LoadTy, PN.getNumIncomingValues(),
1281                                  PN.getName() + ".sroa.speculated");
1282 
1283   // Get the AA tags and alignment to use from one of the loads. It does not
1284   // matter which one we get and if any differ.
1285   AAMDNodes AATags = SomeLoad->getAAMetadata();
1286   Align Alignment = SomeLoad->getAlign();
1287 
1288   // Rewrite all loads of the PN to use the new PHI.
1289   while (!PN.use_empty()) {
1290     LoadInst *LI = cast<LoadInst>(PN.user_back());
1291     LI->replaceAllUsesWith(NewPN);
1292     LI->eraseFromParent();
1293   }
1294 
1295   // Inject loads into all of the pred blocks.
1296   DenseMap<BasicBlock*, Value*> InjectedLoads;
1297   for (unsigned Idx = 0, Num = PN.getNumIncomingValues(); Idx != Num; ++Idx) {
1298     BasicBlock *Pred = PN.getIncomingBlock(Idx);
1299     Value *InVal = PN.getIncomingValue(Idx);
1300 
1301     // A PHI node is allowed to have multiple (duplicated) entries for the same
1302     // basic block, as long as the value is the same. So if we already injected
1303     // a load in the predecessor, then we should reuse the same load for all
1304     // duplicated entries.
1305     if (Value* V = InjectedLoads.lookup(Pred)) {
1306       NewPN->addIncoming(V, Pred);
1307       continue;
1308     }
1309 
1310     Instruction *TI = Pred->getTerminator();
1311     IRB.SetInsertPoint(TI);
1312 
1313     LoadInst *Load = IRB.CreateAlignedLoad(
1314         LoadTy, InVal, Alignment,
1315         (PN.getName() + ".sroa.speculate.load." + Pred->getName()));
1316     ++NumLoadsSpeculated;
1317     if (AATags)
1318       Load->setAAMetadata(AATags);
1319     NewPN->addIncoming(Load, Pred);
1320     InjectedLoads[Pred] = Load;
1321   }
1322 
1323   LLVM_DEBUG(dbgs() << "          speculated to: " << *NewPN << "\n");
1324   PN.eraseFromParent();
1325 }
1326 
1327 /// Select instructions that use an alloca and are subsequently loaded can be
1328 /// rewritten to load both input pointers and then select between the result,
1329 /// allowing the load of the alloca to be promoted.
1330 /// From this:
1331 ///   %P2 = select i1 %cond, i32* %Alloca, i32* %Other
1332 ///   %V = load i32* %P2
1333 /// to:
1334 ///   %V1 = load i32* %Alloca      -> will be mem2reg'd
1335 ///   %V2 = load i32* %Other
1336 ///   %V = select i1 %cond, i32 %V1, i32 %V2
1337 ///
1338 /// We can do this to a select if its only uses are loads and if the operand
1339 /// to the select can be loaded unconditionally. If found an intervening bitcast
1340 /// with a single use of the load, allow the promotion.
1341 static bool isSafeSelectToSpeculate(SelectInst &SI) {
1342   Value *TValue = SI.getTrueValue();
1343   Value *FValue = SI.getFalseValue();
1344   const DataLayout &DL = SI.getModule()->getDataLayout();
1345 
1346   for (User *U : SI.users()) {
1347     LoadInst *LI;
1348     BitCastInst *BC = dyn_cast<BitCastInst>(U);
1349     if (BC && BC->hasOneUse())
1350       LI = dyn_cast<LoadInst>(*BC->user_begin());
1351     else
1352       LI = dyn_cast<LoadInst>(U);
1353 
1354     if (!LI || !LI->isSimple())
1355       return false;
1356 
1357     // Both operands to the select need to be dereferenceable, either
1358     // absolutely (e.g. allocas) or at this point because we can see other
1359     // accesses to it.
1360     if (!isSafeToLoadUnconditionally(TValue, LI->getType(),
1361                                      LI->getAlign(), DL, LI))
1362       return false;
1363     if (!isSafeToLoadUnconditionally(FValue, LI->getType(),
1364                                      LI->getAlign(), DL, LI))
1365       return false;
1366   }
1367 
1368   return true;
1369 }
1370 
1371 static void speculateSelectInstLoads(IRBuilderTy &IRB, SelectInst &SI) {
1372   LLVM_DEBUG(dbgs() << "    original: " << SI << "\n");
1373 
1374   IRB.SetInsertPoint(&SI);
1375   Value *TV = SI.getTrueValue();
1376   Value *FV = SI.getFalseValue();
1377   // Replace the loads of the select with a select of two loads.
1378   while (!SI.use_empty()) {
1379     LoadInst *LI;
1380     BitCastInst *BC = dyn_cast<BitCastInst>(SI.user_back());
1381     if (BC) {
1382       assert(BC->hasOneUse() && "Bitcast should have a single use.");
1383       LI = cast<LoadInst>(BC->user_back());
1384     } else {
1385       LI = cast<LoadInst>(SI.user_back());
1386     }
1387 
1388     assert(LI->isSimple() && "We only speculate simple loads");
1389 
1390     IRB.SetInsertPoint(LI);
1391     Value *NewTV =
1392         BC ? IRB.CreateBitCast(TV, BC->getType(), TV->getName() + ".sroa.cast")
1393            : TV;
1394     Value *NewFV =
1395         BC ? IRB.CreateBitCast(FV, BC->getType(), FV->getName() + ".sroa.cast")
1396            : FV;
1397     LoadInst *TL = IRB.CreateLoad(LI->getType(), NewTV,
1398                                   LI->getName() + ".sroa.speculate.load.true");
1399     LoadInst *FL = IRB.CreateLoad(LI->getType(), NewFV,
1400                                   LI->getName() + ".sroa.speculate.load.false");
1401     NumLoadsSpeculated += 2;
1402 
1403     // Transfer alignment and AA info if present.
1404     TL->setAlignment(LI->getAlign());
1405     FL->setAlignment(LI->getAlign());
1406 
1407     AAMDNodes Tags = LI->getAAMetadata();
1408     if (Tags) {
1409       TL->setAAMetadata(Tags);
1410       FL->setAAMetadata(Tags);
1411     }
1412 
1413     Value *V = IRB.CreateSelect(SI.getCondition(), TL, FL,
1414                                 LI->getName() + ".sroa.speculated");
1415 
1416     LLVM_DEBUG(dbgs() << "          speculated to: " << *V << "\n");
1417     LI->replaceAllUsesWith(V);
1418     LI->eraseFromParent();
1419     if (BC)
1420       BC->eraseFromParent();
1421   }
1422   SI.eraseFromParent();
1423 }
1424 
1425 /// Build a GEP out of a base pointer and indices.
1426 ///
1427 /// This will return the BasePtr if that is valid, or build a new GEP
1428 /// instruction using the IRBuilder if GEP-ing is needed.
1429 static Value *buildGEP(IRBuilderTy &IRB, Value *BasePtr,
1430                        SmallVectorImpl<Value *> &Indices,
1431                        const Twine &NamePrefix) {
1432   if (Indices.empty())
1433     return BasePtr;
1434 
1435   // A single zero index is a no-op, so check for this and avoid building a GEP
1436   // in that case.
1437   if (Indices.size() == 1 && cast<ConstantInt>(Indices.back())->isZero())
1438     return BasePtr;
1439 
1440   return IRB.CreateInBoundsGEP(BasePtr->getType()->getPointerElementType(),
1441                                BasePtr, Indices, NamePrefix + "sroa_idx");
1442 }
1443 
1444 /// Get a natural GEP off of the BasePtr walking through Ty toward
1445 /// TargetTy without changing the offset of the pointer.
1446 ///
1447 /// This routine assumes we've already established a properly offset GEP with
1448 /// Indices, and arrived at the Ty type. The goal is to continue to GEP with
1449 /// zero-indices down through type layers until we find one the same as
1450 /// TargetTy. If we can't find one with the same type, we at least try to use
1451 /// one with the same size. If none of that works, we just produce the GEP as
1452 /// indicated by Indices to have the correct offset.
1453 static Value *getNaturalGEPWithType(IRBuilderTy &IRB, const DataLayout &DL,
1454                                     Value *BasePtr, Type *Ty, Type *TargetTy,
1455                                     SmallVectorImpl<Value *> &Indices,
1456                                     const Twine &NamePrefix) {
1457   if (Ty == TargetTy)
1458     return buildGEP(IRB, BasePtr, Indices, NamePrefix);
1459 
1460   // Offset size to use for the indices.
1461   unsigned OffsetSize = DL.getIndexTypeSizeInBits(BasePtr->getType());
1462 
1463   // See if we can descend into a struct and locate a field with the correct
1464   // type.
1465   unsigned NumLayers = 0;
1466   Type *ElementTy = Ty;
1467   do {
1468     if (ElementTy->isPointerTy())
1469       break;
1470 
1471     if (ArrayType *ArrayTy = dyn_cast<ArrayType>(ElementTy)) {
1472       ElementTy = ArrayTy->getElementType();
1473       Indices.push_back(IRB.getIntN(OffsetSize, 0));
1474     } else if (VectorType *VectorTy = dyn_cast<VectorType>(ElementTy)) {
1475       ElementTy = VectorTy->getElementType();
1476       Indices.push_back(IRB.getInt32(0));
1477     } else if (StructType *STy = dyn_cast<StructType>(ElementTy)) {
1478       if (STy->element_begin() == STy->element_end())
1479         break; // Nothing left to descend into.
1480       ElementTy = *STy->element_begin();
1481       Indices.push_back(IRB.getInt32(0));
1482     } else {
1483       break;
1484     }
1485     ++NumLayers;
1486   } while (ElementTy != TargetTy);
1487   if (ElementTy != TargetTy)
1488     Indices.erase(Indices.end() - NumLayers, Indices.end());
1489 
1490   return buildGEP(IRB, BasePtr, Indices, NamePrefix);
1491 }
1492 
1493 /// Get a natural GEP from a base pointer to a particular offset and
1494 /// resulting in a particular type.
1495 ///
1496 /// The goal is to produce a "natural" looking GEP that works with the existing
1497 /// composite types to arrive at the appropriate offset and element type for
1498 /// a pointer. TargetTy is the element type the returned GEP should point-to if
1499 /// possible. We recurse by decreasing Offset, adding the appropriate index to
1500 /// Indices, and setting Ty to the result subtype.
1501 ///
1502 /// If no natural GEP can be constructed, this function returns null.
1503 static Value *getNaturalGEPWithOffset(IRBuilderTy &IRB, const DataLayout &DL,
1504                                       Value *Ptr, APInt Offset, Type *TargetTy,
1505                                       SmallVectorImpl<Value *> &Indices,
1506                                       const Twine &NamePrefix) {
1507   PointerType *Ty = cast<PointerType>(Ptr->getType());
1508 
1509   // Don't consider any GEPs through an i8* as natural unless the TargetTy is
1510   // an i8.
1511   if (Ty == IRB.getInt8PtrTy(Ty->getAddressSpace()) && TargetTy->isIntegerTy(8))
1512     return nullptr;
1513 
1514   Type *ElementTy = Ty->getElementType();
1515   if (!ElementTy->isSized())
1516     return nullptr; // We can't GEP through an unsized element.
1517 
1518   SmallVector<APInt> IntIndices = DL.getGEPIndicesForOffset(ElementTy, Offset);
1519   if (Offset != 0)
1520     return nullptr;
1521 
1522   for (const APInt &Index : IntIndices)
1523     Indices.push_back(IRB.getInt(Index));
1524   return getNaturalGEPWithType(IRB, DL, Ptr, ElementTy, TargetTy, Indices,
1525                                NamePrefix);
1526 }
1527 
1528 /// Compute an adjusted pointer from Ptr by Offset bytes where the
1529 /// resulting pointer has PointerTy.
1530 ///
1531 /// This tries very hard to compute a "natural" GEP which arrives at the offset
1532 /// and produces the pointer type desired. Where it cannot, it will try to use
1533 /// the natural GEP to arrive at the offset and bitcast to the type. Where that
1534 /// fails, it will try to use an existing i8* and GEP to the byte offset and
1535 /// bitcast to the type.
1536 ///
1537 /// The strategy for finding the more natural GEPs is to peel off layers of the
1538 /// pointer, walking back through bit casts and GEPs, searching for a base
1539 /// pointer from which we can compute a natural GEP with the desired
1540 /// properties. The algorithm tries to fold as many constant indices into
1541 /// a single GEP as possible, thus making each GEP more independent of the
1542 /// surrounding code.
1543 static Value *getAdjustedPtr(IRBuilderTy &IRB, const DataLayout &DL, Value *Ptr,
1544                              APInt Offset, Type *PointerTy,
1545                              const Twine &NamePrefix) {
1546   // Create i8 GEP for opaque pointers.
1547   if (Ptr->getType()->isOpaquePointerTy()) {
1548     if (Offset != 0)
1549       Ptr = IRB.CreateInBoundsGEP(IRB.getInt8Ty(), Ptr, IRB.getInt(Offset),
1550                                   NamePrefix + "sroa_idx");
1551     return IRB.CreatePointerBitCastOrAddrSpaceCast(Ptr, PointerTy,
1552                                                    NamePrefix + "sroa_cast");
1553   }
1554 
1555   // Even though we don't look through PHI nodes, we could be called on an
1556   // instruction in an unreachable block, which may be on a cycle.
1557   SmallPtrSet<Value *, 4> Visited;
1558   Visited.insert(Ptr);
1559   SmallVector<Value *, 4> Indices;
1560 
1561   // We may end up computing an offset pointer that has the wrong type. If we
1562   // never are able to compute one directly that has the correct type, we'll
1563   // fall back to it, so keep it and the base it was computed from around here.
1564   Value *OffsetPtr = nullptr;
1565   Value *OffsetBasePtr;
1566 
1567   // Remember any i8 pointer we come across to re-use if we need to do a raw
1568   // byte offset.
1569   Value *Int8Ptr = nullptr;
1570   APInt Int8PtrOffset(Offset.getBitWidth(), 0);
1571 
1572   PointerType *TargetPtrTy = cast<PointerType>(PointerTy);
1573   Type *TargetTy = TargetPtrTy->getElementType();
1574 
1575   // As `addrspacecast` is , `Ptr` (the storage pointer) may have different
1576   // address space from the expected `PointerTy` (the pointer to be used).
1577   // Adjust the pointer type based the original storage pointer.
1578   auto AS = cast<PointerType>(Ptr->getType())->getAddressSpace();
1579   PointerTy = TargetTy->getPointerTo(AS);
1580 
1581   do {
1582     // First fold any existing GEPs into the offset.
1583     while (GEPOperator *GEP = dyn_cast<GEPOperator>(Ptr)) {
1584       APInt GEPOffset(Offset.getBitWidth(), 0);
1585       if (!GEP->accumulateConstantOffset(DL, GEPOffset))
1586         break;
1587       Offset += GEPOffset;
1588       Ptr = GEP->getPointerOperand();
1589       if (!Visited.insert(Ptr).second)
1590         break;
1591     }
1592 
1593     // See if we can perform a natural GEP here.
1594     Indices.clear();
1595     if (Value *P = getNaturalGEPWithOffset(IRB, DL, Ptr, Offset, TargetTy,
1596                                            Indices, NamePrefix)) {
1597       // If we have a new natural pointer at the offset, clear out any old
1598       // offset pointer we computed. Unless it is the base pointer or
1599       // a non-instruction, we built a GEP we don't need. Zap it.
1600       if (OffsetPtr && OffsetPtr != OffsetBasePtr)
1601         if (Instruction *I = dyn_cast<Instruction>(OffsetPtr)) {
1602           assert(I->use_empty() && "Built a GEP with uses some how!");
1603           I->eraseFromParent();
1604         }
1605       OffsetPtr = P;
1606       OffsetBasePtr = Ptr;
1607       // If we also found a pointer of the right type, we're done.
1608       if (P->getType() == PointerTy)
1609         break;
1610     }
1611 
1612     // Stash this pointer if we've found an i8*.
1613     if (Ptr->getType()->isIntegerTy(8)) {
1614       Int8Ptr = Ptr;
1615       Int8PtrOffset = Offset;
1616     }
1617 
1618     // Peel off a layer of the pointer and update the offset appropriately.
1619     if (Operator::getOpcode(Ptr) == Instruction::BitCast) {
1620       Ptr = cast<Operator>(Ptr)->getOperand(0);
1621     } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(Ptr)) {
1622       if (GA->isInterposable())
1623         break;
1624       Ptr = GA->getAliasee();
1625     } else {
1626       break;
1627     }
1628     assert(Ptr->getType()->isPointerTy() && "Unexpected operand type!");
1629   } while (Visited.insert(Ptr).second);
1630 
1631   if (!OffsetPtr) {
1632     if (!Int8Ptr) {
1633       Int8Ptr = IRB.CreateBitCast(
1634           Ptr, IRB.getInt8PtrTy(PointerTy->getPointerAddressSpace()),
1635           NamePrefix + "sroa_raw_cast");
1636       Int8PtrOffset = Offset;
1637     }
1638 
1639     OffsetPtr = Int8PtrOffset == 0
1640                     ? Int8Ptr
1641                     : IRB.CreateInBoundsGEP(IRB.getInt8Ty(), Int8Ptr,
1642                                             IRB.getInt(Int8PtrOffset),
1643                                             NamePrefix + "sroa_raw_idx");
1644   }
1645   Ptr = OffsetPtr;
1646 
1647   // On the off chance we were targeting i8*, guard the bitcast here.
1648   if (cast<PointerType>(Ptr->getType()) != TargetPtrTy) {
1649     Ptr = IRB.CreatePointerBitCastOrAddrSpaceCast(Ptr,
1650                                                   TargetPtrTy,
1651                                                   NamePrefix + "sroa_cast");
1652   }
1653 
1654   return Ptr;
1655 }
1656 
1657 /// Compute the adjusted alignment for a load or store from an offset.
1658 static Align getAdjustedAlignment(Instruction *I, uint64_t Offset) {
1659   return commonAlignment(getLoadStoreAlignment(I), Offset);
1660 }
1661 
1662 /// Test whether we can convert a value from the old to the new type.
1663 ///
1664 /// This predicate should be used to guard calls to convertValue in order to
1665 /// ensure that we only try to convert viable values. The strategy is that we
1666 /// will peel off single element struct and array wrappings to get to an
1667 /// underlying value, and convert that value.
1668 static bool canConvertValue(const DataLayout &DL, Type *OldTy, Type *NewTy) {
1669   if (OldTy == NewTy)
1670     return true;
1671 
1672   // For integer types, we can't handle any bit-width differences. This would
1673   // break both vector conversions with extension and introduce endianness
1674   // issues when in conjunction with loads and stores.
1675   if (isa<IntegerType>(OldTy) && isa<IntegerType>(NewTy)) {
1676     assert(cast<IntegerType>(OldTy)->getBitWidth() !=
1677                cast<IntegerType>(NewTy)->getBitWidth() &&
1678            "We can't have the same bitwidth for different int types");
1679     return false;
1680   }
1681 
1682   if (DL.getTypeSizeInBits(NewTy).getFixedSize() !=
1683       DL.getTypeSizeInBits(OldTy).getFixedSize())
1684     return false;
1685   if (!NewTy->isSingleValueType() || !OldTy->isSingleValueType())
1686     return false;
1687 
1688   // We can convert pointers to integers and vice-versa. Same for vectors
1689   // of pointers and integers.
1690   OldTy = OldTy->getScalarType();
1691   NewTy = NewTy->getScalarType();
1692   if (NewTy->isPointerTy() || OldTy->isPointerTy()) {
1693     if (NewTy->isPointerTy() && OldTy->isPointerTy()) {
1694       unsigned OldAS = OldTy->getPointerAddressSpace();
1695       unsigned NewAS = NewTy->getPointerAddressSpace();
1696       // Convert pointers if they are pointers from the same address space or
1697       // different integral (not non-integral) address spaces with the same
1698       // pointer size.
1699       return OldAS == NewAS ||
1700              (!DL.isNonIntegralAddressSpace(OldAS) &&
1701               !DL.isNonIntegralAddressSpace(NewAS) &&
1702               DL.getPointerSize(OldAS) == DL.getPointerSize(NewAS));
1703     }
1704 
1705     // We can convert integers to integral pointers, but not to non-integral
1706     // pointers.
1707     if (OldTy->isIntegerTy())
1708       return !DL.isNonIntegralPointerType(NewTy);
1709 
1710     // We can convert integral pointers to integers, but non-integral pointers
1711     // need to remain pointers.
1712     if (!DL.isNonIntegralPointerType(OldTy))
1713       return NewTy->isIntegerTy();
1714 
1715     return false;
1716   }
1717 
1718   return true;
1719 }
1720 
1721 /// Generic routine to convert an SSA value to a value of a different
1722 /// type.
1723 ///
1724 /// This will try various different casting techniques, such as bitcasts,
1725 /// inttoptr, and ptrtoint casts. Use the \c canConvertValue predicate to test
1726 /// two types for viability with this routine.
1727 static Value *convertValue(const DataLayout &DL, IRBuilderTy &IRB, Value *V,
1728                            Type *NewTy) {
1729   Type *OldTy = V->getType();
1730   assert(canConvertValue(DL, OldTy, NewTy) && "Value not convertable to type");
1731 
1732   if (OldTy == NewTy)
1733     return V;
1734 
1735   assert(!(isa<IntegerType>(OldTy) && isa<IntegerType>(NewTy)) &&
1736          "Integer types must be the exact same to convert.");
1737 
1738   // See if we need inttoptr for this type pair. May require additional bitcast.
1739   if (OldTy->isIntOrIntVectorTy() && NewTy->isPtrOrPtrVectorTy()) {
1740     // Expand <2 x i32> to i8* --> <2 x i32> to i64 to i8*
1741     // Expand i128 to <2 x i8*> --> i128 to <2 x i64> to <2 x i8*>
1742     // Expand <4 x i32> to <2 x i8*> --> <4 x i32> to <2 x i64> to <2 x i8*>
1743     // Directly handle i64 to i8*
1744     return IRB.CreateIntToPtr(IRB.CreateBitCast(V, DL.getIntPtrType(NewTy)),
1745                               NewTy);
1746   }
1747 
1748   // See if we need ptrtoint for this type pair. May require additional bitcast.
1749   if (OldTy->isPtrOrPtrVectorTy() && NewTy->isIntOrIntVectorTy()) {
1750     // Expand <2 x i8*> to i128 --> <2 x i8*> to <2 x i64> to i128
1751     // Expand i8* to <2 x i32> --> i8* to i64 to <2 x i32>
1752     // Expand <2 x i8*> to <4 x i32> --> <2 x i8*> to <2 x i64> to <4 x i32>
1753     // Expand i8* to i64 --> i8* to i64 to i64
1754     return IRB.CreateBitCast(IRB.CreatePtrToInt(V, DL.getIntPtrType(OldTy)),
1755                              NewTy);
1756   }
1757 
1758   if (OldTy->isPtrOrPtrVectorTy() && NewTy->isPtrOrPtrVectorTy()) {
1759     unsigned OldAS = OldTy->getPointerAddressSpace();
1760     unsigned NewAS = NewTy->getPointerAddressSpace();
1761     // To convert pointers with different address spaces (they are already
1762     // checked convertible, i.e. they have the same pointer size), so far we
1763     // cannot use `bitcast` (which has restrict on the same address space) or
1764     // `addrspacecast` (which is not always no-op casting). Instead, use a pair
1765     // of no-op `ptrtoint`/`inttoptr` casts through an integer with the same bit
1766     // size.
1767     if (OldAS != NewAS) {
1768       assert(DL.getPointerSize(OldAS) == DL.getPointerSize(NewAS));
1769       return IRB.CreateIntToPtr(IRB.CreatePtrToInt(V, DL.getIntPtrType(OldTy)),
1770                                 NewTy);
1771     }
1772   }
1773 
1774   return IRB.CreateBitCast(V, NewTy);
1775 }
1776 
1777 /// Test whether the given slice use can be promoted to a vector.
1778 ///
1779 /// This function is called to test each entry in a partition which is slated
1780 /// for a single slice.
1781 static bool isVectorPromotionViableForSlice(Partition &P, const Slice &S,
1782                                             VectorType *Ty,
1783                                             uint64_t ElementSize,
1784                                             const DataLayout &DL) {
1785   // First validate the slice offsets.
1786   uint64_t BeginOffset =
1787       std::max(S.beginOffset(), P.beginOffset()) - P.beginOffset();
1788   uint64_t BeginIndex = BeginOffset / ElementSize;
1789   if (BeginIndex * ElementSize != BeginOffset ||
1790       BeginIndex >= cast<FixedVectorType>(Ty)->getNumElements())
1791     return false;
1792   uint64_t EndOffset =
1793       std::min(S.endOffset(), P.endOffset()) - P.beginOffset();
1794   uint64_t EndIndex = EndOffset / ElementSize;
1795   if (EndIndex * ElementSize != EndOffset ||
1796       EndIndex > cast<FixedVectorType>(Ty)->getNumElements())
1797     return false;
1798 
1799   assert(EndIndex > BeginIndex && "Empty vector!");
1800   uint64_t NumElements = EndIndex - BeginIndex;
1801   Type *SliceTy = (NumElements == 1)
1802                       ? Ty->getElementType()
1803                       : FixedVectorType::get(Ty->getElementType(), NumElements);
1804 
1805   Type *SplitIntTy =
1806       Type::getIntNTy(Ty->getContext(), NumElements * ElementSize * 8);
1807 
1808   Use *U = S.getUse();
1809 
1810   if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(U->getUser())) {
1811     if (MI->isVolatile())
1812       return false;
1813     if (!S.isSplittable())
1814       return false; // Skip any unsplittable intrinsics.
1815   } else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(U->getUser())) {
1816     if (!II->isLifetimeStartOrEnd() && !II->isDroppable())
1817       return false;
1818   } else if (LoadInst *LI = dyn_cast<LoadInst>(U->getUser())) {
1819     if (LI->isVolatile())
1820       return false;
1821     Type *LTy = LI->getType();
1822     // Disable vector promotion when there are loads or stores of an FCA.
1823     if (LTy->isStructTy())
1824       return false;
1825     if (P.beginOffset() > S.beginOffset() || P.endOffset() < S.endOffset()) {
1826       assert(LTy->isIntegerTy());
1827       LTy = SplitIntTy;
1828     }
1829     if (!canConvertValue(DL, SliceTy, LTy))
1830       return false;
1831   } else if (StoreInst *SI = dyn_cast<StoreInst>(U->getUser())) {
1832     if (SI->isVolatile())
1833       return false;
1834     Type *STy = SI->getValueOperand()->getType();
1835     // Disable vector promotion when there are loads or stores of an FCA.
1836     if (STy->isStructTy())
1837       return false;
1838     if (P.beginOffset() > S.beginOffset() || P.endOffset() < S.endOffset()) {
1839       assert(STy->isIntegerTy());
1840       STy = SplitIntTy;
1841     }
1842     if (!canConvertValue(DL, STy, SliceTy))
1843       return false;
1844   } else {
1845     return false;
1846   }
1847 
1848   return true;
1849 }
1850 
1851 /// Test whether the given alloca partitioning and range of slices can be
1852 /// promoted to a vector.
1853 ///
1854 /// This is a quick test to check whether we can rewrite a particular alloca
1855 /// partition (and its newly formed alloca) into a vector alloca with only
1856 /// whole-vector loads and stores such that it could be promoted to a vector
1857 /// SSA value. We only can ensure this for a limited set of operations, and we
1858 /// don't want to do the rewrites unless we are confident that the result will
1859 /// be promotable, so we have an early test here.
1860 static VectorType *isVectorPromotionViable(Partition &P, const DataLayout &DL) {
1861   // Collect the candidate types for vector-based promotion. Also track whether
1862   // we have different element types.
1863   SmallVector<VectorType *, 4> CandidateTys;
1864   Type *CommonEltTy = nullptr;
1865   bool HaveCommonEltTy = true;
1866   auto CheckCandidateType = [&](Type *Ty) {
1867     if (auto *VTy = dyn_cast<VectorType>(Ty)) {
1868       // Return if bitcast to vectors is different for total size in bits.
1869       if (!CandidateTys.empty()) {
1870         VectorType *V = CandidateTys[0];
1871         if (DL.getTypeSizeInBits(VTy).getFixedSize() !=
1872             DL.getTypeSizeInBits(V).getFixedSize()) {
1873           CandidateTys.clear();
1874           return;
1875         }
1876       }
1877       CandidateTys.push_back(VTy);
1878       if (!CommonEltTy)
1879         CommonEltTy = VTy->getElementType();
1880       else if (CommonEltTy != VTy->getElementType())
1881         HaveCommonEltTy = false;
1882     }
1883   };
1884   // Consider any loads or stores that are the exact size of the slice.
1885   for (const Slice &S : P)
1886     if (S.beginOffset() == P.beginOffset() &&
1887         S.endOffset() == P.endOffset()) {
1888       if (auto *LI = dyn_cast<LoadInst>(S.getUse()->getUser()))
1889         CheckCandidateType(LI->getType());
1890       else if (auto *SI = dyn_cast<StoreInst>(S.getUse()->getUser()))
1891         CheckCandidateType(SI->getValueOperand()->getType());
1892     }
1893 
1894   // If we didn't find a vector type, nothing to do here.
1895   if (CandidateTys.empty())
1896     return nullptr;
1897 
1898   // Remove non-integer vector types if we had multiple common element types.
1899   // FIXME: It'd be nice to replace them with integer vector types, but we can't
1900   // do that until all the backends are known to produce good code for all
1901   // integer vector types.
1902   if (!HaveCommonEltTy) {
1903     llvm::erase_if(CandidateTys, [](VectorType *VTy) {
1904       return !VTy->getElementType()->isIntegerTy();
1905     });
1906 
1907     // If there were no integer vector types, give up.
1908     if (CandidateTys.empty())
1909       return nullptr;
1910 
1911     // Rank the remaining candidate vector types. This is easy because we know
1912     // they're all integer vectors. We sort by ascending number of elements.
1913     auto RankVectorTypes = [&DL](VectorType *RHSTy, VectorType *LHSTy) {
1914       (void)DL;
1915       assert(DL.getTypeSizeInBits(RHSTy).getFixedSize() ==
1916                  DL.getTypeSizeInBits(LHSTy).getFixedSize() &&
1917              "Cannot have vector types of different sizes!");
1918       assert(RHSTy->getElementType()->isIntegerTy() &&
1919              "All non-integer types eliminated!");
1920       assert(LHSTy->getElementType()->isIntegerTy() &&
1921              "All non-integer types eliminated!");
1922       return cast<FixedVectorType>(RHSTy)->getNumElements() <
1923              cast<FixedVectorType>(LHSTy)->getNumElements();
1924     };
1925     llvm::sort(CandidateTys, RankVectorTypes);
1926     CandidateTys.erase(
1927         std::unique(CandidateTys.begin(), CandidateTys.end(), RankVectorTypes),
1928         CandidateTys.end());
1929   } else {
1930 // The only way to have the same element type in every vector type is to
1931 // have the same vector type. Check that and remove all but one.
1932 #ifndef NDEBUG
1933     for (VectorType *VTy : CandidateTys) {
1934       assert(VTy->getElementType() == CommonEltTy &&
1935              "Unaccounted for element type!");
1936       assert(VTy == CandidateTys[0] &&
1937              "Different vector types with the same element type!");
1938     }
1939 #endif
1940     CandidateTys.resize(1);
1941   }
1942 
1943   // Try each vector type, and return the one which works.
1944   auto CheckVectorTypeForPromotion = [&](VectorType *VTy) {
1945     uint64_t ElementSize =
1946         DL.getTypeSizeInBits(VTy->getElementType()).getFixedSize();
1947 
1948     // While the definition of LLVM vectors is bitpacked, we don't support sizes
1949     // that aren't byte sized.
1950     if (ElementSize % 8)
1951       return false;
1952     assert((DL.getTypeSizeInBits(VTy).getFixedSize() % 8) == 0 &&
1953            "vector size not a multiple of element size?");
1954     ElementSize /= 8;
1955 
1956     for (const Slice &S : P)
1957       if (!isVectorPromotionViableForSlice(P, S, VTy, ElementSize, DL))
1958         return false;
1959 
1960     for (const Slice *S : P.splitSliceTails())
1961       if (!isVectorPromotionViableForSlice(P, *S, VTy, ElementSize, DL))
1962         return false;
1963 
1964     return true;
1965   };
1966   for (VectorType *VTy : CandidateTys)
1967     if (CheckVectorTypeForPromotion(VTy))
1968       return VTy;
1969 
1970   return nullptr;
1971 }
1972 
1973 /// Test whether a slice of an alloca is valid for integer widening.
1974 ///
1975 /// This implements the necessary checking for the \c isIntegerWideningViable
1976 /// test below on a single slice of the alloca.
1977 static bool isIntegerWideningViableForSlice(const Slice &S,
1978                                             uint64_t AllocBeginOffset,
1979                                             Type *AllocaTy,
1980                                             const DataLayout &DL,
1981                                             bool &WholeAllocaOp) {
1982   uint64_t Size = DL.getTypeStoreSize(AllocaTy).getFixedSize();
1983 
1984   uint64_t RelBegin = S.beginOffset() - AllocBeginOffset;
1985   uint64_t RelEnd = S.endOffset() - AllocBeginOffset;
1986 
1987   // We can't reasonably handle cases where the load or store extends past
1988   // the end of the alloca's type and into its padding.
1989   if (RelEnd > Size)
1990     return false;
1991 
1992   Use *U = S.getUse();
1993 
1994   if (LoadInst *LI = dyn_cast<LoadInst>(U->getUser())) {
1995     if (LI->isVolatile())
1996       return false;
1997     // We can't handle loads that extend past the allocated memory.
1998     if (DL.getTypeStoreSize(LI->getType()).getFixedSize() > Size)
1999       return false;
2000     // So far, AllocaSliceRewriter does not support widening split slice tails
2001     // in rewriteIntegerLoad.
2002     if (S.beginOffset() < AllocBeginOffset)
2003       return false;
2004     // Note that we don't count vector loads or stores as whole-alloca
2005     // operations which enable integer widening because we would prefer to use
2006     // vector widening instead.
2007     if (!isa<VectorType>(LI->getType()) && RelBegin == 0 && RelEnd == Size)
2008       WholeAllocaOp = true;
2009     if (IntegerType *ITy = dyn_cast<IntegerType>(LI->getType())) {
2010       if (ITy->getBitWidth() < DL.getTypeStoreSizeInBits(ITy).getFixedSize())
2011         return false;
2012     } else if (RelBegin != 0 || RelEnd != Size ||
2013                !canConvertValue(DL, AllocaTy, LI->getType())) {
2014       // Non-integer loads need to be convertible from the alloca type so that
2015       // they are promotable.
2016       return false;
2017     }
2018   } else if (StoreInst *SI = dyn_cast<StoreInst>(U->getUser())) {
2019     Type *ValueTy = SI->getValueOperand()->getType();
2020     if (SI->isVolatile())
2021       return false;
2022     // We can't handle stores that extend past the allocated memory.
2023     if (DL.getTypeStoreSize(ValueTy).getFixedSize() > Size)
2024       return false;
2025     // So far, AllocaSliceRewriter does not support widening split slice tails
2026     // in rewriteIntegerStore.
2027     if (S.beginOffset() < AllocBeginOffset)
2028       return false;
2029     // Note that we don't count vector loads or stores as whole-alloca
2030     // operations which enable integer widening because we would prefer to use
2031     // vector widening instead.
2032     if (!isa<VectorType>(ValueTy) && RelBegin == 0 && RelEnd == Size)
2033       WholeAllocaOp = true;
2034     if (IntegerType *ITy = dyn_cast<IntegerType>(ValueTy)) {
2035       if (ITy->getBitWidth() < DL.getTypeStoreSizeInBits(ITy).getFixedSize())
2036         return false;
2037     } else if (RelBegin != 0 || RelEnd != Size ||
2038                !canConvertValue(DL, ValueTy, AllocaTy)) {
2039       // Non-integer stores need to be convertible to the alloca type so that
2040       // they are promotable.
2041       return false;
2042     }
2043   } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(U->getUser())) {
2044     if (MI->isVolatile() || !isa<Constant>(MI->getLength()))
2045       return false;
2046     if (!S.isSplittable())
2047       return false; // Skip any unsplittable intrinsics.
2048   } else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(U->getUser())) {
2049     if (!II->isLifetimeStartOrEnd() && !II->isDroppable())
2050       return false;
2051   } else {
2052     return false;
2053   }
2054 
2055   return true;
2056 }
2057 
2058 /// Test whether the given alloca partition's integer operations can be
2059 /// widened to promotable ones.
2060 ///
2061 /// This is a quick test to check whether we can rewrite the integer loads and
2062 /// stores to a particular alloca into wider loads and stores and be able to
2063 /// promote the resulting alloca.
2064 static bool isIntegerWideningViable(Partition &P, Type *AllocaTy,
2065                                     const DataLayout &DL) {
2066   uint64_t SizeInBits = DL.getTypeSizeInBits(AllocaTy).getFixedSize();
2067   // Don't create integer types larger than the maximum bitwidth.
2068   if (SizeInBits > IntegerType::MAX_INT_BITS)
2069     return false;
2070 
2071   // Don't try to handle allocas with bit-padding.
2072   if (SizeInBits != DL.getTypeStoreSizeInBits(AllocaTy).getFixedSize())
2073     return false;
2074 
2075   // We need to ensure that an integer type with the appropriate bitwidth can
2076   // be converted to the alloca type, whatever that is. We don't want to force
2077   // the alloca itself to have an integer type if there is a more suitable one.
2078   Type *IntTy = Type::getIntNTy(AllocaTy->getContext(), SizeInBits);
2079   if (!canConvertValue(DL, AllocaTy, IntTy) ||
2080       !canConvertValue(DL, IntTy, AllocaTy))
2081     return false;
2082 
2083   // While examining uses, we ensure that the alloca has a covering load or
2084   // store. We don't want to widen the integer operations only to fail to
2085   // promote due to some other unsplittable entry (which we may make splittable
2086   // later). However, if there are only splittable uses, go ahead and assume
2087   // that we cover the alloca.
2088   // FIXME: We shouldn't consider split slices that happen to start in the
2089   // partition here...
2090   bool WholeAllocaOp = P.empty() && DL.isLegalInteger(SizeInBits);
2091 
2092   for (const Slice &S : P)
2093     if (!isIntegerWideningViableForSlice(S, P.beginOffset(), AllocaTy, DL,
2094                                          WholeAllocaOp))
2095       return false;
2096 
2097   for (const Slice *S : P.splitSliceTails())
2098     if (!isIntegerWideningViableForSlice(*S, P.beginOffset(), AllocaTy, DL,
2099                                          WholeAllocaOp))
2100       return false;
2101 
2102   return WholeAllocaOp;
2103 }
2104 
2105 static Value *extractInteger(const DataLayout &DL, IRBuilderTy &IRB, Value *V,
2106                              IntegerType *Ty, uint64_t Offset,
2107                              const Twine &Name) {
2108   LLVM_DEBUG(dbgs() << "       start: " << *V << "\n");
2109   IntegerType *IntTy = cast<IntegerType>(V->getType());
2110   assert(DL.getTypeStoreSize(Ty).getFixedSize() + Offset <=
2111              DL.getTypeStoreSize(IntTy).getFixedSize() &&
2112          "Element extends past full value");
2113   uint64_t ShAmt = 8 * Offset;
2114   if (DL.isBigEndian())
2115     ShAmt = 8 * (DL.getTypeStoreSize(IntTy).getFixedSize() -
2116                  DL.getTypeStoreSize(Ty).getFixedSize() - Offset);
2117   if (ShAmt) {
2118     V = IRB.CreateLShr(V, ShAmt, Name + ".shift");
2119     LLVM_DEBUG(dbgs() << "     shifted: " << *V << "\n");
2120   }
2121   assert(Ty->getBitWidth() <= IntTy->getBitWidth() &&
2122          "Cannot extract to a larger integer!");
2123   if (Ty != IntTy) {
2124     V = IRB.CreateTrunc(V, Ty, Name + ".trunc");
2125     LLVM_DEBUG(dbgs() << "     trunced: " << *V << "\n");
2126   }
2127   return V;
2128 }
2129 
2130 static Value *insertInteger(const DataLayout &DL, IRBuilderTy &IRB, Value *Old,
2131                             Value *V, uint64_t Offset, const Twine &Name) {
2132   IntegerType *IntTy = cast<IntegerType>(Old->getType());
2133   IntegerType *Ty = cast<IntegerType>(V->getType());
2134   assert(Ty->getBitWidth() <= IntTy->getBitWidth() &&
2135          "Cannot insert a larger integer!");
2136   LLVM_DEBUG(dbgs() << "       start: " << *V << "\n");
2137   if (Ty != IntTy) {
2138     V = IRB.CreateZExt(V, IntTy, Name + ".ext");
2139     LLVM_DEBUG(dbgs() << "    extended: " << *V << "\n");
2140   }
2141   assert(DL.getTypeStoreSize(Ty).getFixedSize() + Offset <=
2142              DL.getTypeStoreSize(IntTy).getFixedSize() &&
2143          "Element store outside of alloca store");
2144   uint64_t ShAmt = 8 * Offset;
2145   if (DL.isBigEndian())
2146     ShAmt = 8 * (DL.getTypeStoreSize(IntTy).getFixedSize() -
2147                  DL.getTypeStoreSize(Ty).getFixedSize() - Offset);
2148   if (ShAmt) {
2149     V = IRB.CreateShl(V, ShAmt, Name + ".shift");
2150     LLVM_DEBUG(dbgs() << "     shifted: " << *V << "\n");
2151   }
2152 
2153   if (ShAmt || Ty->getBitWidth() < IntTy->getBitWidth()) {
2154     APInt Mask = ~Ty->getMask().zext(IntTy->getBitWidth()).shl(ShAmt);
2155     Old = IRB.CreateAnd(Old, Mask, Name + ".mask");
2156     LLVM_DEBUG(dbgs() << "      masked: " << *Old << "\n");
2157     V = IRB.CreateOr(Old, V, Name + ".insert");
2158     LLVM_DEBUG(dbgs() << "    inserted: " << *V << "\n");
2159   }
2160   return V;
2161 }
2162 
2163 static Value *extractVector(IRBuilderTy &IRB, Value *V, unsigned BeginIndex,
2164                             unsigned EndIndex, const Twine &Name) {
2165   auto *VecTy = cast<FixedVectorType>(V->getType());
2166   unsigned NumElements = EndIndex - BeginIndex;
2167   assert(NumElements <= VecTy->getNumElements() && "Too many elements!");
2168 
2169   if (NumElements == VecTy->getNumElements())
2170     return V;
2171 
2172   if (NumElements == 1) {
2173     V = IRB.CreateExtractElement(V, IRB.getInt32(BeginIndex),
2174                                  Name + ".extract");
2175     LLVM_DEBUG(dbgs() << "     extract: " << *V << "\n");
2176     return V;
2177   }
2178 
2179   SmallVector<int, 8> Mask;
2180   Mask.reserve(NumElements);
2181   for (unsigned i = BeginIndex; i != EndIndex; ++i)
2182     Mask.push_back(i);
2183   V = IRB.CreateShuffleVector(V, Mask, Name + ".extract");
2184   LLVM_DEBUG(dbgs() << "     shuffle: " << *V << "\n");
2185   return V;
2186 }
2187 
2188 static Value *insertVector(IRBuilderTy &IRB, Value *Old, Value *V,
2189                            unsigned BeginIndex, const Twine &Name) {
2190   VectorType *VecTy = cast<VectorType>(Old->getType());
2191   assert(VecTy && "Can only insert a vector into a vector");
2192 
2193   VectorType *Ty = dyn_cast<VectorType>(V->getType());
2194   if (!Ty) {
2195     // Single element to insert.
2196     V = IRB.CreateInsertElement(Old, V, IRB.getInt32(BeginIndex),
2197                                 Name + ".insert");
2198     LLVM_DEBUG(dbgs() << "     insert: " << *V << "\n");
2199     return V;
2200   }
2201 
2202   assert(cast<FixedVectorType>(Ty)->getNumElements() <=
2203              cast<FixedVectorType>(VecTy)->getNumElements() &&
2204          "Too many elements!");
2205   if (cast<FixedVectorType>(Ty)->getNumElements() ==
2206       cast<FixedVectorType>(VecTy)->getNumElements()) {
2207     assert(V->getType() == VecTy && "Vector type mismatch");
2208     return V;
2209   }
2210   unsigned EndIndex = BeginIndex + cast<FixedVectorType>(Ty)->getNumElements();
2211 
2212   // When inserting a smaller vector into the larger to store, we first
2213   // use a shuffle vector to widen it with undef elements, and then
2214   // a second shuffle vector to select between the loaded vector and the
2215   // incoming vector.
2216   SmallVector<int, 8> Mask;
2217   Mask.reserve(cast<FixedVectorType>(VecTy)->getNumElements());
2218   for (unsigned i = 0; i != cast<FixedVectorType>(VecTy)->getNumElements(); ++i)
2219     if (i >= BeginIndex && i < EndIndex)
2220       Mask.push_back(i - BeginIndex);
2221     else
2222       Mask.push_back(-1);
2223   V = IRB.CreateShuffleVector(V, Mask, Name + ".expand");
2224   LLVM_DEBUG(dbgs() << "    shuffle: " << *V << "\n");
2225 
2226   SmallVector<Constant *, 8> Mask2;
2227   Mask2.reserve(cast<FixedVectorType>(VecTy)->getNumElements());
2228   for (unsigned i = 0; i != cast<FixedVectorType>(VecTy)->getNumElements(); ++i)
2229     Mask2.push_back(IRB.getInt1(i >= BeginIndex && i < EndIndex));
2230 
2231   V = IRB.CreateSelect(ConstantVector::get(Mask2), V, Old, Name + "blend");
2232 
2233   LLVM_DEBUG(dbgs() << "    blend: " << *V << "\n");
2234   return V;
2235 }
2236 
2237 /// Visitor to rewrite instructions using p particular slice of an alloca
2238 /// to use a new alloca.
2239 ///
2240 /// Also implements the rewriting to vector-based accesses when the partition
2241 /// passes the isVectorPromotionViable predicate. Most of the rewriting logic
2242 /// lives here.
2243 class llvm::sroa::AllocaSliceRewriter
2244     : public InstVisitor<AllocaSliceRewriter, bool> {
2245   // Befriend the base class so it can delegate to private visit methods.
2246   friend class InstVisitor<AllocaSliceRewriter, bool>;
2247 
2248   using Base = InstVisitor<AllocaSliceRewriter, bool>;
2249 
2250   const DataLayout &DL;
2251   AllocaSlices &AS;
2252   SROAPass &Pass;
2253   AllocaInst &OldAI, &NewAI;
2254   const uint64_t NewAllocaBeginOffset, NewAllocaEndOffset;
2255   Type *NewAllocaTy;
2256 
2257   // This is a convenience and flag variable that will be null unless the new
2258   // alloca's integer operations should be widened to this integer type due to
2259   // passing isIntegerWideningViable above. If it is non-null, the desired
2260   // integer type will be stored here for easy access during rewriting.
2261   IntegerType *IntTy;
2262 
2263   // If we are rewriting an alloca partition which can be written as pure
2264   // vector operations, we stash extra information here. When VecTy is
2265   // non-null, we have some strict guarantees about the rewritten alloca:
2266   //   - The new alloca is exactly the size of the vector type here.
2267   //   - The accesses all either map to the entire vector or to a single
2268   //     element.
2269   //   - The set of accessing instructions is only one of those handled above
2270   //     in isVectorPromotionViable. Generally these are the same access kinds
2271   //     which are promotable via mem2reg.
2272   VectorType *VecTy;
2273   Type *ElementTy;
2274   uint64_t ElementSize;
2275 
2276   // The original offset of the slice currently being rewritten relative to
2277   // the original alloca.
2278   uint64_t BeginOffset = 0;
2279   uint64_t EndOffset = 0;
2280 
2281   // The new offsets of the slice currently being rewritten relative to the
2282   // original alloca.
2283   uint64_t NewBeginOffset = 0, NewEndOffset = 0;
2284 
2285   uint64_t SliceSize = 0;
2286   bool IsSplittable = false;
2287   bool IsSplit = false;
2288   Use *OldUse = nullptr;
2289   Instruction *OldPtr = nullptr;
2290 
2291   // Track post-rewrite users which are PHI nodes and Selects.
2292   SmallSetVector<PHINode *, 8> &PHIUsers;
2293   SmallSetVector<SelectInst *, 8> &SelectUsers;
2294 
2295   // Utility IR builder, whose name prefix is setup for each visited use, and
2296   // the insertion point is set to point to the user.
2297   IRBuilderTy IRB;
2298 
2299 public:
2300   AllocaSliceRewriter(const DataLayout &DL, AllocaSlices &AS, SROAPass &Pass,
2301                       AllocaInst &OldAI, AllocaInst &NewAI,
2302                       uint64_t NewAllocaBeginOffset,
2303                       uint64_t NewAllocaEndOffset, bool IsIntegerPromotable,
2304                       VectorType *PromotableVecTy,
2305                       SmallSetVector<PHINode *, 8> &PHIUsers,
2306                       SmallSetVector<SelectInst *, 8> &SelectUsers)
2307       : DL(DL), AS(AS), Pass(Pass), OldAI(OldAI), NewAI(NewAI),
2308         NewAllocaBeginOffset(NewAllocaBeginOffset),
2309         NewAllocaEndOffset(NewAllocaEndOffset),
2310         NewAllocaTy(NewAI.getAllocatedType()),
2311         IntTy(
2312             IsIntegerPromotable
2313                 ? Type::getIntNTy(NewAI.getContext(),
2314                                   DL.getTypeSizeInBits(NewAI.getAllocatedType())
2315                                       .getFixedSize())
2316                 : nullptr),
2317         VecTy(PromotableVecTy),
2318         ElementTy(VecTy ? VecTy->getElementType() : nullptr),
2319         ElementSize(VecTy ? DL.getTypeSizeInBits(ElementTy).getFixedSize() / 8
2320                           : 0),
2321         PHIUsers(PHIUsers), SelectUsers(SelectUsers),
2322         IRB(NewAI.getContext(), ConstantFolder()) {
2323     if (VecTy) {
2324       assert((DL.getTypeSizeInBits(ElementTy).getFixedSize() % 8) == 0 &&
2325              "Only multiple-of-8 sized vector elements are viable");
2326       ++NumVectorized;
2327     }
2328     assert((!IntTy && !VecTy) || (IntTy && !VecTy) || (!IntTy && VecTy));
2329   }
2330 
2331   bool visit(AllocaSlices::const_iterator I) {
2332     bool CanSROA = true;
2333     BeginOffset = I->beginOffset();
2334     EndOffset = I->endOffset();
2335     IsSplittable = I->isSplittable();
2336     IsSplit =
2337         BeginOffset < NewAllocaBeginOffset || EndOffset > NewAllocaEndOffset;
2338     LLVM_DEBUG(dbgs() << "  rewriting " << (IsSplit ? "split " : ""));
2339     LLVM_DEBUG(AS.printSlice(dbgs(), I, ""));
2340     LLVM_DEBUG(dbgs() << "\n");
2341 
2342     // Compute the intersecting offset range.
2343     assert(BeginOffset < NewAllocaEndOffset);
2344     assert(EndOffset > NewAllocaBeginOffset);
2345     NewBeginOffset = std::max(BeginOffset, NewAllocaBeginOffset);
2346     NewEndOffset = std::min(EndOffset, NewAllocaEndOffset);
2347 
2348     SliceSize = NewEndOffset - NewBeginOffset;
2349 
2350     OldUse = I->getUse();
2351     OldPtr = cast<Instruction>(OldUse->get());
2352 
2353     Instruction *OldUserI = cast<Instruction>(OldUse->getUser());
2354     IRB.SetInsertPoint(OldUserI);
2355     IRB.SetCurrentDebugLocation(OldUserI->getDebugLoc());
2356     IRB.getInserter().SetNamePrefix(
2357         Twine(NewAI.getName()) + "." + Twine(BeginOffset) + ".");
2358 
2359     CanSROA &= visit(cast<Instruction>(OldUse->getUser()));
2360     if (VecTy || IntTy)
2361       assert(CanSROA);
2362     return CanSROA;
2363   }
2364 
2365 private:
2366   // Make sure the other visit overloads are visible.
2367   using Base::visit;
2368 
2369   // Every instruction which can end up as a user must have a rewrite rule.
2370   bool visitInstruction(Instruction &I) {
2371     LLVM_DEBUG(dbgs() << "    !!!! Cannot rewrite: " << I << "\n");
2372     llvm_unreachable("No rewrite rule for this instruction!");
2373   }
2374 
2375   Value *getNewAllocaSlicePtr(IRBuilderTy &IRB, Type *PointerTy) {
2376     // Note that the offset computation can use BeginOffset or NewBeginOffset
2377     // interchangeably for unsplit slices.
2378     assert(IsSplit || BeginOffset == NewBeginOffset);
2379     uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset;
2380 
2381 #ifndef NDEBUG
2382     StringRef OldName = OldPtr->getName();
2383     // Skip through the last '.sroa.' component of the name.
2384     size_t LastSROAPrefix = OldName.rfind(".sroa.");
2385     if (LastSROAPrefix != StringRef::npos) {
2386       OldName = OldName.substr(LastSROAPrefix + strlen(".sroa."));
2387       // Look for an SROA slice index.
2388       size_t IndexEnd = OldName.find_first_not_of("0123456789");
2389       if (IndexEnd != StringRef::npos && OldName[IndexEnd] == '.') {
2390         // Strip the index and look for the offset.
2391         OldName = OldName.substr(IndexEnd + 1);
2392         size_t OffsetEnd = OldName.find_first_not_of("0123456789");
2393         if (OffsetEnd != StringRef::npos && OldName[OffsetEnd] == '.')
2394           // Strip the offset.
2395           OldName = OldName.substr(OffsetEnd + 1);
2396       }
2397     }
2398     // Strip any SROA suffixes as well.
2399     OldName = OldName.substr(0, OldName.find(".sroa_"));
2400 #endif
2401 
2402     return getAdjustedPtr(IRB, DL, &NewAI,
2403                           APInt(DL.getIndexTypeSizeInBits(PointerTy), Offset),
2404                           PointerTy,
2405 #ifndef NDEBUG
2406                           Twine(OldName) + "."
2407 #else
2408                           Twine()
2409 #endif
2410                           );
2411   }
2412 
2413   /// Compute suitable alignment to access this slice of the *new*
2414   /// alloca.
2415   ///
2416   /// You can optionally pass a type to this routine and if that type's ABI
2417   /// alignment is itself suitable, this will return zero.
2418   Align getSliceAlign() {
2419     return commonAlignment(NewAI.getAlign(),
2420                            NewBeginOffset - NewAllocaBeginOffset);
2421   }
2422 
2423   unsigned getIndex(uint64_t Offset) {
2424     assert(VecTy && "Can only call getIndex when rewriting a vector");
2425     uint64_t RelOffset = Offset - NewAllocaBeginOffset;
2426     assert(RelOffset / ElementSize < UINT32_MAX && "Index out of bounds");
2427     uint32_t Index = RelOffset / ElementSize;
2428     assert(Index * ElementSize == RelOffset);
2429     return Index;
2430   }
2431 
2432   void deleteIfTriviallyDead(Value *V) {
2433     Instruction *I = cast<Instruction>(V);
2434     if (isInstructionTriviallyDead(I))
2435       Pass.DeadInsts.push_back(I);
2436   }
2437 
2438   Value *rewriteVectorizedLoadInst(LoadInst &LI) {
2439     unsigned BeginIndex = getIndex(NewBeginOffset);
2440     unsigned EndIndex = getIndex(NewEndOffset);
2441     assert(EndIndex > BeginIndex && "Empty vector!");
2442 
2443     LoadInst *Load = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI,
2444                                            NewAI.getAlign(), "load");
2445 
2446     Load->copyMetadata(LI, {LLVMContext::MD_mem_parallel_loop_access,
2447                             LLVMContext::MD_access_group});
2448     return extractVector(IRB, Load, BeginIndex, EndIndex, "vec");
2449   }
2450 
2451   Value *rewriteIntegerLoad(LoadInst &LI) {
2452     assert(IntTy && "We cannot insert an integer to the alloca");
2453     assert(!LI.isVolatile());
2454     Value *V = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI,
2455                                      NewAI.getAlign(), "load");
2456     V = convertValue(DL, IRB, V, IntTy);
2457     assert(NewBeginOffset >= NewAllocaBeginOffset && "Out of bounds offset");
2458     uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset;
2459     if (Offset > 0 || NewEndOffset < NewAllocaEndOffset) {
2460       IntegerType *ExtractTy = Type::getIntNTy(LI.getContext(), SliceSize * 8);
2461       V = extractInteger(DL, IRB, V, ExtractTy, Offset, "extract");
2462     }
2463     // It is possible that the extracted type is not the load type. This
2464     // happens if there is a load past the end of the alloca, and as
2465     // a consequence the slice is narrower but still a candidate for integer
2466     // lowering. To handle this case, we just zero extend the extracted
2467     // integer.
2468     assert(cast<IntegerType>(LI.getType())->getBitWidth() >= SliceSize * 8 &&
2469            "Can only handle an extract for an overly wide load");
2470     if (cast<IntegerType>(LI.getType())->getBitWidth() > SliceSize * 8)
2471       V = IRB.CreateZExt(V, LI.getType());
2472     return V;
2473   }
2474 
2475   bool visitLoadInst(LoadInst &LI) {
2476     LLVM_DEBUG(dbgs() << "    original: " << LI << "\n");
2477     Value *OldOp = LI.getOperand(0);
2478     assert(OldOp == OldPtr);
2479 
2480     AAMDNodes AATags = LI.getAAMetadata();
2481 
2482     unsigned AS = LI.getPointerAddressSpace();
2483 
2484     Type *TargetTy = IsSplit ? Type::getIntNTy(LI.getContext(), SliceSize * 8)
2485                              : LI.getType();
2486     const bool IsLoadPastEnd =
2487         DL.getTypeStoreSize(TargetTy).getFixedSize() > SliceSize;
2488     bool IsPtrAdjusted = false;
2489     Value *V;
2490     if (VecTy) {
2491       V = rewriteVectorizedLoadInst(LI);
2492     } else if (IntTy && LI.getType()->isIntegerTy()) {
2493       V = rewriteIntegerLoad(LI);
2494     } else if (NewBeginOffset == NewAllocaBeginOffset &&
2495                NewEndOffset == NewAllocaEndOffset &&
2496                (canConvertValue(DL, NewAllocaTy, TargetTy) ||
2497                 (IsLoadPastEnd && NewAllocaTy->isIntegerTy() &&
2498                  TargetTy->isIntegerTy()))) {
2499       LoadInst *NewLI = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI,
2500                                               NewAI.getAlign(), LI.isVolatile(),
2501                                               LI.getName());
2502       if (AATags)
2503         NewLI->setAAMetadata(AATags.shift(NewBeginOffset - BeginOffset));
2504       if (LI.isVolatile())
2505         NewLI->setAtomic(LI.getOrdering(), LI.getSyncScopeID());
2506       if (NewLI->isAtomic())
2507         NewLI->setAlignment(LI.getAlign());
2508 
2509       // Any !nonnull metadata or !range metadata on the old load is also valid
2510       // on the new load. This is even true in some cases even when the loads
2511       // are different types, for example by mapping !nonnull metadata to
2512       // !range metadata by modeling the null pointer constant converted to the
2513       // integer type.
2514       // FIXME: Add support for range metadata here. Currently the utilities
2515       // for this don't propagate range metadata in trivial cases from one
2516       // integer load to another, don't handle non-addrspace-0 null pointers
2517       // correctly, and don't have any support for mapping ranges as the
2518       // integer type becomes winder or narrower.
2519       if (MDNode *N = LI.getMetadata(LLVMContext::MD_nonnull))
2520         copyNonnullMetadata(LI, N, *NewLI);
2521 
2522       // Try to preserve nonnull metadata
2523       V = NewLI;
2524 
2525       // If this is an integer load past the end of the slice (which means the
2526       // bytes outside the slice are undef or this load is dead) just forcibly
2527       // fix the integer size with correct handling of endianness.
2528       if (auto *AITy = dyn_cast<IntegerType>(NewAllocaTy))
2529         if (auto *TITy = dyn_cast<IntegerType>(TargetTy))
2530           if (AITy->getBitWidth() < TITy->getBitWidth()) {
2531             V = IRB.CreateZExt(V, TITy, "load.ext");
2532             if (DL.isBigEndian())
2533               V = IRB.CreateShl(V, TITy->getBitWidth() - AITy->getBitWidth(),
2534                                 "endian_shift");
2535           }
2536     } else {
2537       Type *LTy = TargetTy->getPointerTo(AS);
2538       LoadInst *NewLI =
2539           IRB.CreateAlignedLoad(TargetTy, getNewAllocaSlicePtr(IRB, LTy),
2540                                 getSliceAlign(), LI.isVolatile(), LI.getName());
2541       if (AATags)
2542         NewLI->setAAMetadata(AATags.shift(NewBeginOffset - BeginOffset));
2543       if (LI.isVolatile())
2544         NewLI->setAtomic(LI.getOrdering(), LI.getSyncScopeID());
2545       NewLI->copyMetadata(LI, {LLVMContext::MD_mem_parallel_loop_access,
2546                                LLVMContext::MD_access_group});
2547 
2548       V = NewLI;
2549       IsPtrAdjusted = true;
2550     }
2551     V = convertValue(DL, IRB, V, TargetTy);
2552 
2553     if (IsSplit) {
2554       assert(!LI.isVolatile());
2555       assert(LI.getType()->isIntegerTy() &&
2556              "Only integer type loads and stores are split");
2557       assert(SliceSize < DL.getTypeStoreSize(LI.getType()).getFixedSize() &&
2558              "Split load isn't smaller than original load");
2559       assert(DL.typeSizeEqualsStoreSize(LI.getType()) &&
2560              "Non-byte-multiple bit width");
2561       // Move the insertion point just past the load so that we can refer to it.
2562       IRB.SetInsertPoint(&*std::next(BasicBlock::iterator(&LI)));
2563       // Create a placeholder value with the same type as LI to use as the
2564       // basis for the new value. This allows us to replace the uses of LI with
2565       // the computed value, and then replace the placeholder with LI, leaving
2566       // LI only used for this computation.
2567       Value *Placeholder = new LoadInst(
2568           LI.getType(), PoisonValue::get(LI.getType()->getPointerTo(AS)), "",
2569           false, Align(1));
2570       V = insertInteger(DL, IRB, Placeholder, V, NewBeginOffset - BeginOffset,
2571                         "insert");
2572       LI.replaceAllUsesWith(V);
2573       Placeholder->replaceAllUsesWith(&LI);
2574       Placeholder->deleteValue();
2575     } else {
2576       LI.replaceAllUsesWith(V);
2577     }
2578 
2579     Pass.DeadInsts.push_back(&LI);
2580     deleteIfTriviallyDead(OldOp);
2581     LLVM_DEBUG(dbgs() << "          to: " << *V << "\n");
2582     return !LI.isVolatile() && !IsPtrAdjusted;
2583   }
2584 
2585   bool rewriteVectorizedStoreInst(Value *V, StoreInst &SI, Value *OldOp,
2586                                   AAMDNodes AATags) {
2587     if (V->getType() != VecTy) {
2588       unsigned BeginIndex = getIndex(NewBeginOffset);
2589       unsigned EndIndex = getIndex(NewEndOffset);
2590       assert(EndIndex > BeginIndex && "Empty vector!");
2591       unsigned NumElements = EndIndex - BeginIndex;
2592       assert(NumElements <= cast<FixedVectorType>(VecTy)->getNumElements() &&
2593              "Too many elements!");
2594       Type *SliceTy = (NumElements == 1)
2595                           ? ElementTy
2596                           : FixedVectorType::get(ElementTy, NumElements);
2597       if (V->getType() != SliceTy)
2598         V = convertValue(DL, IRB, V, SliceTy);
2599 
2600       // Mix in the existing elements.
2601       Value *Old = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI,
2602                                          NewAI.getAlign(), "load");
2603       V = insertVector(IRB, Old, V, BeginIndex, "vec");
2604     }
2605     StoreInst *Store = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlign());
2606     Store->copyMetadata(SI, {LLVMContext::MD_mem_parallel_loop_access,
2607                              LLVMContext::MD_access_group});
2608     if (AATags)
2609       Store->setAAMetadata(AATags.shift(NewBeginOffset - BeginOffset));
2610     Pass.DeadInsts.push_back(&SI);
2611 
2612     LLVM_DEBUG(dbgs() << "          to: " << *Store << "\n");
2613     return true;
2614   }
2615 
2616   bool rewriteIntegerStore(Value *V, StoreInst &SI, AAMDNodes AATags) {
2617     assert(IntTy && "We cannot extract an integer from the alloca");
2618     assert(!SI.isVolatile());
2619     if (DL.getTypeSizeInBits(V->getType()).getFixedSize() !=
2620         IntTy->getBitWidth()) {
2621       Value *Old = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI,
2622                                          NewAI.getAlign(), "oldload");
2623       Old = convertValue(DL, IRB, Old, IntTy);
2624       assert(BeginOffset >= NewAllocaBeginOffset && "Out of bounds offset");
2625       uint64_t Offset = BeginOffset - NewAllocaBeginOffset;
2626       V = insertInteger(DL, IRB, Old, SI.getValueOperand(), Offset, "insert");
2627     }
2628     V = convertValue(DL, IRB, V, NewAllocaTy);
2629     StoreInst *Store = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlign());
2630     Store->copyMetadata(SI, {LLVMContext::MD_mem_parallel_loop_access,
2631                              LLVMContext::MD_access_group});
2632     if (AATags)
2633       Store->setAAMetadata(AATags.shift(NewBeginOffset - BeginOffset));
2634     Pass.DeadInsts.push_back(&SI);
2635     LLVM_DEBUG(dbgs() << "          to: " << *Store << "\n");
2636     return true;
2637   }
2638 
2639   bool visitStoreInst(StoreInst &SI) {
2640     LLVM_DEBUG(dbgs() << "    original: " << SI << "\n");
2641     Value *OldOp = SI.getOperand(1);
2642     assert(OldOp == OldPtr);
2643 
2644     AAMDNodes AATags = SI.getAAMetadata();
2645     Value *V = SI.getValueOperand();
2646 
2647     // Strip all inbounds GEPs and pointer casts to try to dig out any root
2648     // alloca that should be re-examined after promoting this alloca.
2649     if (V->getType()->isPointerTy())
2650       if (AllocaInst *AI = dyn_cast<AllocaInst>(V->stripInBoundsOffsets()))
2651         Pass.PostPromotionWorklist.insert(AI);
2652 
2653     if (SliceSize < DL.getTypeStoreSize(V->getType()).getFixedSize()) {
2654       assert(!SI.isVolatile());
2655       assert(V->getType()->isIntegerTy() &&
2656              "Only integer type loads and stores are split");
2657       assert(DL.typeSizeEqualsStoreSize(V->getType()) &&
2658              "Non-byte-multiple bit width");
2659       IntegerType *NarrowTy = Type::getIntNTy(SI.getContext(), SliceSize * 8);
2660       V = extractInteger(DL, IRB, V, NarrowTy, NewBeginOffset - BeginOffset,
2661                          "extract");
2662     }
2663 
2664     if (VecTy)
2665       return rewriteVectorizedStoreInst(V, SI, OldOp, AATags);
2666     if (IntTy && V->getType()->isIntegerTy())
2667       return rewriteIntegerStore(V, SI, AATags);
2668 
2669     const bool IsStorePastEnd =
2670         DL.getTypeStoreSize(V->getType()).getFixedSize() > SliceSize;
2671     StoreInst *NewSI;
2672     if (NewBeginOffset == NewAllocaBeginOffset &&
2673         NewEndOffset == NewAllocaEndOffset &&
2674         (canConvertValue(DL, V->getType(), NewAllocaTy) ||
2675          (IsStorePastEnd && NewAllocaTy->isIntegerTy() &&
2676           V->getType()->isIntegerTy()))) {
2677       // If this is an integer store past the end of slice (and thus the bytes
2678       // past that point are irrelevant or this is unreachable), truncate the
2679       // value prior to storing.
2680       if (auto *VITy = dyn_cast<IntegerType>(V->getType()))
2681         if (auto *AITy = dyn_cast<IntegerType>(NewAllocaTy))
2682           if (VITy->getBitWidth() > AITy->getBitWidth()) {
2683             if (DL.isBigEndian())
2684               V = IRB.CreateLShr(V, VITy->getBitWidth() - AITy->getBitWidth(),
2685                                  "endian_shift");
2686             V = IRB.CreateTrunc(V, AITy, "load.trunc");
2687           }
2688 
2689       V = convertValue(DL, IRB, V, NewAllocaTy);
2690       NewSI =
2691           IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlign(), SI.isVolatile());
2692     } else {
2693       unsigned AS = SI.getPointerAddressSpace();
2694       Value *NewPtr = getNewAllocaSlicePtr(IRB, V->getType()->getPointerTo(AS));
2695       NewSI =
2696           IRB.CreateAlignedStore(V, NewPtr, getSliceAlign(), SI.isVolatile());
2697     }
2698     NewSI->copyMetadata(SI, {LLVMContext::MD_mem_parallel_loop_access,
2699                              LLVMContext::MD_access_group});
2700     if (AATags)
2701       NewSI->setAAMetadata(AATags.shift(NewBeginOffset - BeginOffset));
2702     if (SI.isVolatile())
2703       NewSI->setAtomic(SI.getOrdering(), SI.getSyncScopeID());
2704     if (NewSI->isAtomic())
2705       NewSI->setAlignment(SI.getAlign());
2706     Pass.DeadInsts.push_back(&SI);
2707     deleteIfTriviallyDead(OldOp);
2708 
2709     LLVM_DEBUG(dbgs() << "          to: " << *NewSI << "\n");
2710     return NewSI->getPointerOperand() == &NewAI &&
2711            NewSI->getValueOperand()->getType() == NewAllocaTy &&
2712            !SI.isVolatile();
2713   }
2714 
2715   /// Compute an integer value from splatting an i8 across the given
2716   /// number of bytes.
2717   ///
2718   /// Note that this routine assumes an i8 is a byte. If that isn't true, don't
2719   /// call this routine.
2720   /// FIXME: Heed the advice above.
2721   ///
2722   /// \param V The i8 value to splat.
2723   /// \param Size The number of bytes in the output (assuming i8 is one byte)
2724   Value *getIntegerSplat(Value *V, unsigned Size) {
2725     assert(Size > 0 && "Expected a positive number of bytes.");
2726     IntegerType *VTy = cast<IntegerType>(V->getType());
2727     assert(VTy->getBitWidth() == 8 && "Expected an i8 value for the byte");
2728     if (Size == 1)
2729       return V;
2730 
2731     Type *SplatIntTy = Type::getIntNTy(VTy->getContext(), Size * 8);
2732     V = IRB.CreateMul(
2733         IRB.CreateZExt(V, SplatIntTy, "zext"),
2734         ConstantExpr::getUDiv(
2735             Constant::getAllOnesValue(SplatIntTy),
2736             ConstantExpr::getZExt(Constant::getAllOnesValue(V->getType()),
2737                                   SplatIntTy)),
2738         "isplat");
2739     return V;
2740   }
2741 
2742   /// Compute a vector splat for a given element value.
2743   Value *getVectorSplat(Value *V, unsigned NumElements) {
2744     V = IRB.CreateVectorSplat(NumElements, V, "vsplat");
2745     LLVM_DEBUG(dbgs() << "       splat: " << *V << "\n");
2746     return V;
2747   }
2748 
2749   bool visitMemSetInst(MemSetInst &II) {
2750     LLVM_DEBUG(dbgs() << "    original: " << II << "\n");
2751     assert(II.getRawDest() == OldPtr);
2752 
2753     AAMDNodes AATags = II.getAAMetadata();
2754 
2755     // If the memset has a variable size, it cannot be split, just adjust the
2756     // pointer to the new alloca.
2757     if (!isa<ConstantInt>(II.getLength())) {
2758       assert(!IsSplit);
2759       assert(NewBeginOffset == BeginOffset);
2760       II.setDest(getNewAllocaSlicePtr(IRB, OldPtr->getType()));
2761       II.setDestAlignment(getSliceAlign());
2762 
2763       deleteIfTriviallyDead(OldPtr);
2764       return false;
2765     }
2766 
2767     // Record this instruction for deletion.
2768     Pass.DeadInsts.push_back(&II);
2769 
2770     Type *AllocaTy = NewAI.getAllocatedType();
2771     Type *ScalarTy = AllocaTy->getScalarType();
2772 
2773     const bool CanContinue = [&]() {
2774       if (VecTy || IntTy)
2775         return true;
2776       if (BeginOffset > NewAllocaBeginOffset ||
2777           EndOffset < NewAllocaEndOffset)
2778         return false;
2779       // Length must be in range for FixedVectorType.
2780       auto *C = cast<ConstantInt>(II.getLength());
2781       const uint64_t Len = C->getLimitedValue();
2782       if (Len > std::numeric_limits<unsigned>::max())
2783         return false;
2784       auto *Int8Ty = IntegerType::getInt8Ty(NewAI.getContext());
2785       auto *SrcTy = FixedVectorType::get(Int8Ty, Len);
2786       return canConvertValue(DL, SrcTy, AllocaTy) &&
2787              DL.isLegalInteger(DL.getTypeSizeInBits(ScalarTy).getFixedSize());
2788     }();
2789 
2790     // If this doesn't map cleanly onto the alloca type, and that type isn't
2791     // a single value type, just emit a memset.
2792     if (!CanContinue) {
2793       Type *SizeTy = II.getLength()->getType();
2794       Constant *Size = ConstantInt::get(SizeTy, NewEndOffset - NewBeginOffset);
2795       CallInst *New = IRB.CreateMemSet(
2796           getNewAllocaSlicePtr(IRB, OldPtr->getType()), II.getValue(), Size,
2797           MaybeAlign(getSliceAlign()), II.isVolatile());
2798       if (AATags)
2799         New->setAAMetadata(AATags.shift(NewBeginOffset - BeginOffset));
2800       LLVM_DEBUG(dbgs() << "          to: " << *New << "\n");
2801       return false;
2802     }
2803 
2804     // If we can represent this as a simple value, we have to build the actual
2805     // value to store, which requires expanding the byte present in memset to
2806     // a sensible representation for the alloca type. This is essentially
2807     // splatting the byte to a sufficiently wide integer, splatting it across
2808     // any desired vector width, and bitcasting to the final type.
2809     Value *V;
2810 
2811     if (VecTy) {
2812       // If this is a memset of a vectorized alloca, insert it.
2813       assert(ElementTy == ScalarTy);
2814 
2815       unsigned BeginIndex = getIndex(NewBeginOffset);
2816       unsigned EndIndex = getIndex(NewEndOffset);
2817       assert(EndIndex > BeginIndex && "Empty vector!");
2818       unsigned NumElements = EndIndex - BeginIndex;
2819       assert(NumElements <= cast<FixedVectorType>(VecTy)->getNumElements() &&
2820              "Too many elements!");
2821 
2822       Value *Splat = getIntegerSplat(
2823           II.getValue(), DL.getTypeSizeInBits(ElementTy).getFixedSize() / 8);
2824       Splat = convertValue(DL, IRB, Splat, ElementTy);
2825       if (NumElements > 1)
2826         Splat = getVectorSplat(Splat, NumElements);
2827 
2828       Value *Old = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI,
2829                                          NewAI.getAlign(), "oldload");
2830       V = insertVector(IRB, Old, Splat, BeginIndex, "vec");
2831     } else if (IntTy) {
2832       // If this is a memset on an alloca where we can widen stores, insert the
2833       // set integer.
2834       assert(!II.isVolatile());
2835 
2836       uint64_t Size = NewEndOffset - NewBeginOffset;
2837       V = getIntegerSplat(II.getValue(), Size);
2838 
2839       if (IntTy && (BeginOffset != NewAllocaBeginOffset ||
2840                     EndOffset != NewAllocaBeginOffset)) {
2841         Value *Old = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI,
2842                                            NewAI.getAlign(), "oldload");
2843         Old = convertValue(DL, IRB, Old, IntTy);
2844         uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset;
2845         V = insertInteger(DL, IRB, Old, V, Offset, "insert");
2846       } else {
2847         assert(V->getType() == IntTy &&
2848                "Wrong type for an alloca wide integer!");
2849       }
2850       V = convertValue(DL, IRB, V, AllocaTy);
2851     } else {
2852       // Established these invariants above.
2853       assert(NewBeginOffset == NewAllocaBeginOffset);
2854       assert(NewEndOffset == NewAllocaEndOffset);
2855 
2856       V = getIntegerSplat(II.getValue(),
2857                           DL.getTypeSizeInBits(ScalarTy).getFixedSize() / 8);
2858       if (VectorType *AllocaVecTy = dyn_cast<VectorType>(AllocaTy))
2859         V = getVectorSplat(
2860             V, cast<FixedVectorType>(AllocaVecTy)->getNumElements());
2861 
2862       V = convertValue(DL, IRB, V, AllocaTy);
2863     }
2864 
2865     StoreInst *New =
2866         IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlign(), II.isVolatile());
2867     New->copyMetadata(II, {LLVMContext::MD_mem_parallel_loop_access,
2868                            LLVMContext::MD_access_group});
2869     if (AATags)
2870       New->setAAMetadata(AATags.shift(NewBeginOffset - BeginOffset));
2871     LLVM_DEBUG(dbgs() << "          to: " << *New << "\n");
2872     return !II.isVolatile();
2873   }
2874 
2875   bool visitMemTransferInst(MemTransferInst &II) {
2876     // Rewriting of memory transfer instructions can be a bit tricky. We break
2877     // them into two categories: split intrinsics and unsplit intrinsics.
2878 
2879     LLVM_DEBUG(dbgs() << "    original: " << II << "\n");
2880 
2881     AAMDNodes AATags = II.getAAMetadata();
2882 
2883     bool IsDest = &II.getRawDestUse() == OldUse;
2884     assert((IsDest && II.getRawDest() == OldPtr) ||
2885            (!IsDest && II.getRawSource() == OldPtr));
2886 
2887     MaybeAlign SliceAlign = getSliceAlign();
2888 
2889     // For unsplit intrinsics, we simply modify the source and destination
2890     // pointers in place. This isn't just an optimization, it is a matter of
2891     // correctness. With unsplit intrinsics we may be dealing with transfers
2892     // within a single alloca before SROA ran, or with transfers that have
2893     // a variable length. We may also be dealing with memmove instead of
2894     // memcpy, and so simply updating the pointers is the necessary for us to
2895     // update both source and dest of a single call.
2896     if (!IsSplittable) {
2897       Value *AdjustedPtr = getNewAllocaSlicePtr(IRB, OldPtr->getType());
2898       if (IsDest) {
2899         II.setDest(AdjustedPtr);
2900         II.setDestAlignment(SliceAlign);
2901       }
2902       else {
2903         II.setSource(AdjustedPtr);
2904         II.setSourceAlignment(SliceAlign);
2905       }
2906 
2907       LLVM_DEBUG(dbgs() << "          to: " << II << "\n");
2908       deleteIfTriviallyDead(OldPtr);
2909       return false;
2910     }
2911     // For split transfer intrinsics we have an incredibly useful assurance:
2912     // the source and destination do not reside within the same alloca, and at
2913     // least one of them does not escape. This means that we can replace
2914     // memmove with memcpy, and we don't need to worry about all manner of
2915     // downsides to splitting and transforming the operations.
2916 
2917     // If this doesn't map cleanly onto the alloca type, and that type isn't
2918     // a single value type, just emit a memcpy.
2919     bool EmitMemCpy =
2920         !VecTy && !IntTy &&
2921         (BeginOffset > NewAllocaBeginOffset || EndOffset < NewAllocaEndOffset ||
2922          SliceSize !=
2923              DL.getTypeStoreSize(NewAI.getAllocatedType()).getFixedSize() ||
2924          !NewAI.getAllocatedType()->isSingleValueType());
2925 
2926     // If we're just going to emit a memcpy, the alloca hasn't changed, and the
2927     // size hasn't been shrunk based on analysis of the viable range, this is
2928     // a no-op.
2929     if (EmitMemCpy && &OldAI == &NewAI) {
2930       // Ensure the start lines up.
2931       assert(NewBeginOffset == BeginOffset);
2932 
2933       // Rewrite the size as needed.
2934       if (NewEndOffset != EndOffset)
2935         II.setLength(ConstantInt::get(II.getLength()->getType(),
2936                                       NewEndOffset - NewBeginOffset));
2937       return false;
2938     }
2939     // Record this instruction for deletion.
2940     Pass.DeadInsts.push_back(&II);
2941 
2942     // Strip all inbounds GEPs and pointer casts to try to dig out any root
2943     // alloca that should be re-examined after rewriting this instruction.
2944     Value *OtherPtr = IsDest ? II.getRawSource() : II.getRawDest();
2945     if (AllocaInst *AI =
2946             dyn_cast<AllocaInst>(OtherPtr->stripInBoundsOffsets())) {
2947       assert(AI != &OldAI && AI != &NewAI &&
2948              "Splittable transfers cannot reach the same alloca on both ends.");
2949       Pass.Worklist.insert(AI);
2950     }
2951 
2952     Type *OtherPtrTy = OtherPtr->getType();
2953     unsigned OtherAS = OtherPtrTy->getPointerAddressSpace();
2954 
2955     // Compute the relative offset for the other pointer within the transfer.
2956     unsigned OffsetWidth = DL.getIndexSizeInBits(OtherAS);
2957     APInt OtherOffset(OffsetWidth, NewBeginOffset - BeginOffset);
2958     Align OtherAlign =
2959         (IsDest ? II.getSourceAlign() : II.getDestAlign()).valueOrOne();
2960     OtherAlign =
2961         commonAlignment(OtherAlign, OtherOffset.zextOrTrunc(64).getZExtValue());
2962 
2963     if (EmitMemCpy) {
2964       // Compute the other pointer, folding as much as possible to produce
2965       // a single, simple GEP in most cases.
2966       OtherPtr = getAdjustedPtr(IRB, DL, OtherPtr, OtherOffset, OtherPtrTy,
2967                                 OtherPtr->getName() + ".");
2968 
2969       Value *OurPtr = getNewAllocaSlicePtr(IRB, OldPtr->getType());
2970       Type *SizeTy = II.getLength()->getType();
2971       Constant *Size = ConstantInt::get(SizeTy, NewEndOffset - NewBeginOffset);
2972 
2973       Value *DestPtr, *SrcPtr;
2974       MaybeAlign DestAlign, SrcAlign;
2975       // Note: IsDest is true iff we're copying into the new alloca slice
2976       if (IsDest) {
2977         DestPtr = OurPtr;
2978         DestAlign = SliceAlign;
2979         SrcPtr = OtherPtr;
2980         SrcAlign = OtherAlign;
2981       } else {
2982         DestPtr = OtherPtr;
2983         DestAlign = OtherAlign;
2984         SrcPtr = OurPtr;
2985         SrcAlign = SliceAlign;
2986       }
2987       CallInst *New = IRB.CreateMemCpy(DestPtr, DestAlign, SrcPtr, SrcAlign,
2988                                        Size, II.isVolatile());
2989       if (AATags)
2990         New->setAAMetadata(AATags.shift(NewBeginOffset - BeginOffset));
2991       LLVM_DEBUG(dbgs() << "          to: " << *New << "\n");
2992       return false;
2993     }
2994 
2995     bool IsWholeAlloca = NewBeginOffset == NewAllocaBeginOffset &&
2996                          NewEndOffset == NewAllocaEndOffset;
2997     uint64_t Size = NewEndOffset - NewBeginOffset;
2998     unsigned BeginIndex = VecTy ? getIndex(NewBeginOffset) : 0;
2999     unsigned EndIndex = VecTy ? getIndex(NewEndOffset) : 0;
3000     unsigned NumElements = EndIndex - BeginIndex;
3001     IntegerType *SubIntTy =
3002         IntTy ? Type::getIntNTy(IntTy->getContext(), Size * 8) : nullptr;
3003 
3004     // Reset the other pointer type to match the register type we're going to
3005     // use, but using the address space of the original other pointer.
3006     Type *OtherTy;
3007     if (VecTy && !IsWholeAlloca) {
3008       if (NumElements == 1)
3009         OtherTy = VecTy->getElementType();
3010       else
3011         OtherTy = FixedVectorType::get(VecTy->getElementType(), NumElements);
3012     } else if (IntTy && !IsWholeAlloca) {
3013       OtherTy = SubIntTy;
3014     } else {
3015       OtherTy = NewAllocaTy;
3016     }
3017     OtherPtrTy = OtherTy->getPointerTo(OtherAS);
3018 
3019     Value *SrcPtr = getAdjustedPtr(IRB, DL, OtherPtr, OtherOffset, OtherPtrTy,
3020                                    OtherPtr->getName() + ".");
3021     MaybeAlign SrcAlign = OtherAlign;
3022     Value *DstPtr = &NewAI;
3023     MaybeAlign DstAlign = SliceAlign;
3024     if (!IsDest) {
3025       std::swap(SrcPtr, DstPtr);
3026       std::swap(SrcAlign, DstAlign);
3027     }
3028 
3029     Value *Src;
3030     if (VecTy && !IsWholeAlloca && !IsDest) {
3031       Src = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI,
3032                                   NewAI.getAlign(), "load");
3033       Src = extractVector(IRB, Src, BeginIndex, EndIndex, "vec");
3034     } else if (IntTy && !IsWholeAlloca && !IsDest) {
3035       Src = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI,
3036                                   NewAI.getAlign(), "load");
3037       Src = convertValue(DL, IRB, Src, IntTy);
3038       uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset;
3039       Src = extractInteger(DL, IRB, Src, SubIntTy, Offset, "extract");
3040     } else {
3041       LoadInst *Load = IRB.CreateAlignedLoad(OtherTy, SrcPtr, SrcAlign,
3042                                              II.isVolatile(), "copyload");
3043       Load->copyMetadata(II, {LLVMContext::MD_mem_parallel_loop_access,
3044                               LLVMContext::MD_access_group});
3045       if (AATags)
3046         Load->setAAMetadata(AATags.shift(NewBeginOffset - BeginOffset));
3047       Src = Load;
3048     }
3049 
3050     if (VecTy && !IsWholeAlloca && IsDest) {
3051       Value *Old = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI,
3052                                          NewAI.getAlign(), "oldload");
3053       Src = insertVector(IRB, Old, Src, BeginIndex, "vec");
3054     } else if (IntTy && !IsWholeAlloca && IsDest) {
3055       Value *Old = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI,
3056                                          NewAI.getAlign(), "oldload");
3057       Old = convertValue(DL, IRB, Old, IntTy);
3058       uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset;
3059       Src = insertInteger(DL, IRB, Old, Src, Offset, "insert");
3060       Src = convertValue(DL, IRB, Src, NewAllocaTy);
3061     }
3062 
3063     StoreInst *Store = cast<StoreInst>(
3064         IRB.CreateAlignedStore(Src, DstPtr, DstAlign, II.isVolatile()));
3065     Store->copyMetadata(II, {LLVMContext::MD_mem_parallel_loop_access,
3066                              LLVMContext::MD_access_group});
3067     if (AATags)
3068       Store->setAAMetadata(AATags.shift(NewBeginOffset - BeginOffset));
3069     LLVM_DEBUG(dbgs() << "          to: " << *Store << "\n");
3070     return !II.isVolatile();
3071   }
3072 
3073   bool visitIntrinsicInst(IntrinsicInst &II) {
3074     assert((II.isLifetimeStartOrEnd() || II.isDroppable()) &&
3075            "Unexpected intrinsic!");
3076     LLVM_DEBUG(dbgs() << "    original: " << II << "\n");
3077 
3078     // Record this instruction for deletion.
3079     Pass.DeadInsts.push_back(&II);
3080 
3081     if (II.isDroppable()) {
3082       assert(II.getIntrinsicID() == Intrinsic::assume && "Expected assume");
3083       // TODO For now we forget assumed information, this can be improved.
3084       OldPtr->dropDroppableUsesIn(II);
3085       return true;
3086     }
3087 
3088     assert(II.getArgOperand(1) == OldPtr);
3089     // Lifetime intrinsics are only promotable if they cover the whole alloca.
3090     // Therefore, we drop lifetime intrinsics which don't cover the whole
3091     // alloca.
3092     // (In theory, intrinsics which partially cover an alloca could be
3093     // promoted, but PromoteMemToReg doesn't handle that case.)
3094     // FIXME: Check whether the alloca is promotable before dropping the
3095     // lifetime intrinsics?
3096     if (NewBeginOffset != NewAllocaBeginOffset ||
3097         NewEndOffset != NewAllocaEndOffset)
3098       return true;
3099 
3100     ConstantInt *Size =
3101         ConstantInt::get(cast<IntegerType>(II.getArgOperand(0)->getType()),
3102                          NewEndOffset - NewBeginOffset);
3103     // Lifetime intrinsics always expect an i8* so directly get such a pointer
3104     // for the new alloca slice.
3105     Type *PointerTy = IRB.getInt8PtrTy(OldPtr->getType()->getPointerAddressSpace());
3106     Value *Ptr = getNewAllocaSlicePtr(IRB, PointerTy);
3107     Value *New;
3108     if (II.getIntrinsicID() == Intrinsic::lifetime_start)
3109       New = IRB.CreateLifetimeStart(Ptr, Size);
3110     else
3111       New = IRB.CreateLifetimeEnd(Ptr, Size);
3112 
3113     (void)New;
3114     LLVM_DEBUG(dbgs() << "          to: " << *New << "\n");
3115 
3116     return true;
3117   }
3118 
3119   void fixLoadStoreAlign(Instruction &Root) {
3120     // This algorithm implements the same visitor loop as
3121     // hasUnsafePHIOrSelectUse, and fixes the alignment of each load
3122     // or store found.
3123     SmallPtrSet<Instruction *, 4> Visited;
3124     SmallVector<Instruction *, 4> Uses;
3125     Visited.insert(&Root);
3126     Uses.push_back(&Root);
3127     do {
3128       Instruction *I = Uses.pop_back_val();
3129 
3130       if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
3131         LI->setAlignment(std::min(LI->getAlign(), getSliceAlign()));
3132         continue;
3133       }
3134       if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
3135         SI->setAlignment(std::min(SI->getAlign(), getSliceAlign()));
3136         continue;
3137       }
3138 
3139       assert(isa<BitCastInst>(I) || isa<AddrSpaceCastInst>(I) ||
3140              isa<PHINode>(I) || isa<SelectInst>(I) ||
3141              isa<GetElementPtrInst>(I));
3142       for (User *U : I->users())
3143         if (Visited.insert(cast<Instruction>(U)).second)
3144           Uses.push_back(cast<Instruction>(U));
3145     } while (!Uses.empty());
3146   }
3147 
3148   bool visitPHINode(PHINode &PN) {
3149     LLVM_DEBUG(dbgs() << "    original: " << PN << "\n");
3150     assert(BeginOffset >= NewAllocaBeginOffset && "PHIs are unsplittable");
3151     assert(EndOffset <= NewAllocaEndOffset && "PHIs are unsplittable");
3152 
3153     // We would like to compute a new pointer in only one place, but have it be
3154     // as local as possible to the PHI. To do that, we re-use the location of
3155     // the old pointer, which necessarily must be in the right position to
3156     // dominate the PHI.
3157     IRBuilderBase::InsertPointGuard Guard(IRB);
3158     if (isa<PHINode>(OldPtr))
3159       IRB.SetInsertPoint(&*OldPtr->getParent()->getFirstInsertionPt());
3160     else
3161       IRB.SetInsertPoint(OldPtr);
3162     IRB.SetCurrentDebugLocation(OldPtr->getDebugLoc());
3163 
3164     Value *NewPtr = getNewAllocaSlicePtr(IRB, OldPtr->getType());
3165     // Replace the operands which were using the old pointer.
3166     std::replace(PN.op_begin(), PN.op_end(), cast<Value>(OldPtr), NewPtr);
3167 
3168     LLVM_DEBUG(dbgs() << "          to: " << PN << "\n");
3169     deleteIfTriviallyDead(OldPtr);
3170 
3171     // Fix the alignment of any loads or stores using this PHI node.
3172     fixLoadStoreAlign(PN);
3173 
3174     // PHIs can't be promoted on their own, but often can be speculated. We
3175     // check the speculation outside of the rewriter so that we see the
3176     // fully-rewritten alloca.
3177     PHIUsers.insert(&PN);
3178     return true;
3179   }
3180 
3181   bool visitSelectInst(SelectInst &SI) {
3182     LLVM_DEBUG(dbgs() << "    original: " << SI << "\n");
3183     assert((SI.getTrueValue() == OldPtr || SI.getFalseValue() == OldPtr) &&
3184            "Pointer isn't an operand!");
3185     assert(BeginOffset >= NewAllocaBeginOffset && "Selects are unsplittable");
3186     assert(EndOffset <= NewAllocaEndOffset && "Selects are unsplittable");
3187 
3188     Value *NewPtr = getNewAllocaSlicePtr(IRB, OldPtr->getType());
3189     // Replace the operands which were using the old pointer.
3190     if (SI.getOperand(1) == OldPtr)
3191       SI.setOperand(1, NewPtr);
3192     if (SI.getOperand(2) == OldPtr)
3193       SI.setOperand(2, NewPtr);
3194 
3195     LLVM_DEBUG(dbgs() << "          to: " << SI << "\n");
3196     deleteIfTriviallyDead(OldPtr);
3197 
3198     // Fix the alignment of any loads or stores using this select.
3199     fixLoadStoreAlign(SI);
3200 
3201     // Selects can't be promoted on their own, but often can be speculated. We
3202     // check the speculation outside of the rewriter so that we see the
3203     // fully-rewritten alloca.
3204     SelectUsers.insert(&SI);
3205     return true;
3206   }
3207 };
3208 
3209 namespace {
3210 
3211 /// Visitor to rewrite aggregate loads and stores as scalar.
3212 ///
3213 /// This pass aggressively rewrites all aggregate loads and stores on
3214 /// a particular pointer (or any pointer derived from it which we can identify)
3215 /// with scalar loads and stores.
3216 class AggLoadStoreRewriter : public InstVisitor<AggLoadStoreRewriter, bool> {
3217   // Befriend the base class so it can delegate to private visit methods.
3218   friend class InstVisitor<AggLoadStoreRewriter, bool>;
3219 
3220   /// Queue of pointer uses to analyze and potentially rewrite.
3221   SmallVector<Use *, 8> Queue;
3222 
3223   /// Set to prevent us from cycling with phi nodes and loops.
3224   SmallPtrSet<User *, 8> Visited;
3225 
3226   /// The current pointer use being rewritten. This is used to dig up the used
3227   /// value (as opposed to the user).
3228   Use *U = nullptr;
3229 
3230   /// Used to calculate offsets, and hence alignment, of subobjects.
3231   const DataLayout &DL;
3232 
3233   IRBuilderTy &IRB;
3234 
3235 public:
3236   AggLoadStoreRewriter(const DataLayout &DL, IRBuilderTy &IRB)
3237       : DL(DL), IRB(IRB) {}
3238 
3239   /// Rewrite loads and stores through a pointer and all pointers derived from
3240   /// it.
3241   bool rewrite(Instruction &I) {
3242     LLVM_DEBUG(dbgs() << "  Rewriting FCA loads and stores...\n");
3243     enqueueUsers(I);
3244     bool Changed = false;
3245     while (!Queue.empty()) {
3246       U = Queue.pop_back_val();
3247       Changed |= visit(cast<Instruction>(U->getUser()));
3248     }
3249     return Changed;
3250   }
3251 
3252 private:
3253   /// Enqueue all the users of the given instruction for further processing.
3254   /// This uses a set to de-duplicate users.
3255   void enqueueUsers(Instruction &I) {
3256     for (Use &U : I.uses())
3257       if (Visited.insert(U.getUser()).second)
3258         Queue.push_back(&U);
3259   }
3260 
3261   // Conservative default is to not rewrite anything.
3262   bool visitInstruction(Instruction &I) { return false; }
3263 
3264   /// Generic recursive split emission class.
3265   template <typename Derived> class OpSplitter {
3266   protected:
3267     /// The builder used to form new instructions.
3268     IRBuilderTy &IRB;
3269 
3270     /// The indices which to be used with insert- or extractvalue to select the
3271     /// appropriate value within the aggregate.
3272     SmallVector<unsigned, 4> Indices;
3273 
3274     /// The indices to a GEP instruction which will move Ptr to the correct slot
3275     /// within the aggregate.
3276     SmallVector<Value *, 4> GEPIndices;
3277 
3278     /// The base pointer of the original op, used as a base for GEPing the
3279     /// split operations.
3280     Value *Ptr;
3281 
3282     /// The base pointee type being GEPed into.
3283     Type *BaseTy;
3284 
3285     /// Known alignment of the base pointer.
3286     Align BaseAlign;
3287 
3288     /// To calculate offset of each component so we can correctly deduce
3289     /// alignments.
3290     const DataLayout &DL;
3291 
3292     /// Initialize the splitter with an insertion point, Ptr and start with a
3293     /// single zero GEP index.
3294     OpSplitter(Instruction *InsertionPoint, Value *Ptr, Type *BaseTy,
3295                Align BaseAlign, const DataLayout &DL, IRBuilderTy &IRB)
3296         : IRB(IRB), GEPIndices(1, IRB.getInt32(0)), Ptr(Ptr), BaseTy(BaseTy),
3297           BaseAlign(BaseAlign), DL(DL) {
3298       IRB.SetInsertPoint(InsertionPoint);
3299     }
3300 
3301   public:
3302     /// Generic recursive split emission routine.
3303     ///
3304     /// This method recursively splits an aggregate op (load or store) into
3305     /// scalar or vector ops. It splits recursively until it hits a single value
3306     /// and emits that single value operation via the template argument.
3307     ///
3308     /// The logic of this routine relies on GEPs and insertvalue and
3309     /// extractvalue all operating with the same fundamental index list, merely
3310     /// formatted differently (GEPs need actual values).
3311     ///
3312     /// \param Ty  The type being split recursively into smaller ops.
3313     /// \param Agg The aggregate value being built up or stored, depending on
3314     /// whether this is splitting a load or a store respectively.
3315     void emitSplitOps(Type *Ty, Value *&Agg, const Twine &Name) {
3316       if (Ty->isSingleValueType()) {
3317         unsigned Offset = DL.getIndexedOffsetInType(BaseTy, GEPIndices);
3318         return static_cast<Derived *>(this)->emitFunc(
3319             Ty, Agg, commonAlignment(BaseAlign, Offset), Name);
3320       }
3321 
3322       if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
3323         unsigned OldSize = Indices.size();
3324         (void)OldSize;
3325         for (unsigned Idx = 0, Size = ATy->getNumElements(); Idx != Size;
3326              ++Idx) {
3327           assert(Indices.size() == OldSize && "Did not return to the old size");
3328           Indices.push_back(Idx);
3329           GEPIndices.push_back(IRB.getInt32(Idx));
3330           emitSplitOps(ATy->getElementType(), Agg, Name + "." + Twine(Idx));
3331           GEPIndices.pop_back();
3332           Indices.pop_back();
3333         }
3334         return;
3335       }
3336 
3337       if (StructType *STy = dyn_cast<StructType>(Ty)) {
3338         unsigned OldSize = Indices.size();
3339         (void)OldSize;
3340         for (unsigned Idx = 0, Size = STy->getNumElements(); Idx != Size;
3341              ++Idx) {
3342           assert(Indices.size() == OldSize && "Did not return to the old size");
3343           Indices.push_back(Idx);
3344           GEPIndices.push_back(IRB.getInt32(Idx));
3345           emitSplitOps(STy->getElementType(Idx), Agg, Name + "." + Twine(Idx));
3346           GEPIndices.pop_back();
3347           Indices.pop_back();
3348         }
3349         return;
3350       }
3351 
3352       llvm_unreachable("Only arrays and structs are aggregate loadable types");
3353     }
3354   };
3355 
3356   struct LoadOpSplitter : public OpSplitter<LoadOpSplitter> {
3357     AAMDNodes AATags;
3358 
3359     LoadOpSplitter(Instruction *InsertionPoint, Value *Ptr, Type *BaseTy,
3360                    AAMDNodes AATags, Align BaseAlign, const DataLayout &DL,
3361                    IRBuilderTy &IRB)
3362         : OpSplitter<LoadOpSplitter>(InsertionPoint, Ptr, BaseTy, BaseAlign, DL,
3363                                      IRB),
3364           AATags(AATags) {}
3365 
3366     /// Emit a leaf load of a single value. This is called at the leaves of the
3367     /// recursive emission to actually load values.
3368     void emitFunc(Type *Ty, Value *&Agg, Align Alignment, const Twine &Name) {
3369       assert(Ty->isSingleValueType());
3370       // Load the single value and insert it using the indices.
3371       Value *GEP =
3372           IRB.CreateInBoundsGEP(BaseTy, Ptr, GEPIndices, Name + ".gep");
3373       LoadInst *Load =
3374           IRB.CreateAlignedLoad(Ty, GEP, Alignment, Name + ".load");
3375 
3376       APInt Offset(
3377           DL.getIndexSizeInBits(Ptr->getType()->getPointerAddressSpace()), 0);
3378       if (AATags &&
3379           GEPOperator::accumulateConstantOffset(BaseTy, GEPIndices, DL, Offset))
3380         Load->setAAMetadata(AATags.shift(Offset.getZExtValue()));
3381 
3382       Agg = IRB.CreateInsertValue(Agg, Load, Indices, Name + ".insert");
3383       LLVM_DEBUG(dbgs() << "          to: " << *Load << "\n");
3384     }
3385   };
3386 
3387   bool visitLoadInst(LoadInst &LI) {
3388     assert(LI.getPointerOperand() == *U);
3389     if (!LI.isSimple() || LI.getType()->isSingleValueType())
3390       return false;
3391 
3392     // We have an aggregate being loaded, split it apart.
3393     LLVM_DEBUG(dbgs() << "    original: " << LI << "\n");
3394     LoadOpSplitter Splitter(&LI, *U, LI.getType(), LI.getAAMetadata(),
3395                             getAdjustedAlignment(&LI, 0), DL, IRB);
3396     Value *V = PoisonValue::get(LI.getType());
3397     Splitter.emitSplitOps(LI.getType(), V, LI.getName() + ".fca");
3398     Visited.erase(&LI);
3399     LI.replaceAllUsesWith(V);
3400     LI.eraseFromParent();
3401     return true;
3402   }
3403 
3404   struct StoreOpSplitter : public OpSplitter<StoreOpSplitter> {
3405     StoreOpSplitter(Instruction *InsertionPoint, Value *Ptr, Type *BaseTy,
3406                     AAMDNodes AATags, Align BaseAlign, const DataLayout &DL,
3407                     IRBuilderTy &IRB)
3408         : OpSplitter<StoreOpSplitter>(InsertionPoint, Ptr, BaseTy, BaseAlign,
3409                                       DL, IRB),
3410           AATags(AATags) {}
3411     AAMDNodes AATags;
3412     /// Emit a leaf store of a single value. This is called at the leaves of the
3413     /// recursive emission to actually produce stores.
3414     void emitFunc(Type *Ty, Value *&Agg, Align Alignment, const Twine &Name) {
3415       assert(Ty->isSingleValueType());
3416       // Extract the single value and store it using the indices.
3417       //
3418       // The gep and extractvalue values are factored out of the CreateStore
3419       // call to make the output independent of the argument evaluation order.
3420       Value *ExtractValue =
3421           IRB.CreateExtractValue(Agg, Indices, Name + ".extract");
3422       Value *InBoundsGEP =
3423           IRB.CreateInBoundsGEP(BaseTy, Ptr, GEPIndices, Name + ".gep");
3424       StoreInst *Store =
3425           IRB.CreateAlignedStore(ExtractValue, InBoundsGEP, Alignment);
3426 
3427       APInt Offset(
3428           DL.getIndexSizeInBits(Ptr->getType()->getPointerAddressSpace()), 0);
3429       if (AATags &&
3430           GEPOperator::accumulateConstantOffset(BaseTy, GEPIndices, DL, Offset))
3431         Store->setAAMetadata(AATags.shift(Offset.getZExtValue()));
3432 
3433       LLVM_DEBUG(dbgs() << "          to: " << *Store << "\n");
3434     }
3435   };
3436 
3437   bool visitStoreInst(StoreInst &SI) {
3438     if (!SI.isSimple() || SI.getPointerOperand() != *U)
3439       return false;
3440     Value *V = SI.getValueOperand();
3441     if (V->getType()->isSingleValueType())
3442       return false;
3443 
3444     // We have an aggregate being stored, split it apart.
3445     LLVM_DEBUG(dbgs() << "    original: " << SI << "\n");
3446     StoreOpSplitter Splitter(&SI, *U, V->getType(), SI.getAAMetadata(),
3447                              getAdjustedAlignment(&SI, 0), DL, IRB);
3448     Splitter.emitSplitOps(V->getType(), V, V->getName() + ".fca");
3449     Visited.erase(&SI);
3450     SI.eraseFromParent();
3451     return true;
3452   }
3453 
3454   bool visitBitCastInst(BitCastInst &BC) {
3455     enqueueUsers(BC);
3456     return false;
3457   }
3458 
3459   bool visitAddrSpaceCastInst(AddrSpaceCastInst &ASC) {
3460     enqueueUsers(ASC);
3461     return false;
3462   }
3463 
3464   // Fold gep (select cond, ptr1, ptr2) => select cond, gep(ptr1), gep(ptr2)
3465   bool foldGEPSelect(GetElementPtrInst &GEPI) {
3466     if (!GEPI.hasAllConstantIndices())
3467       return false;
3468 
3469     SelectInst *Sel = cast<SelectInst>(GEPI.getPointerOperand());
3470 
3471     LLVM_DEBUG(dbgs() << "  Rewriting gep(select) -> select(gep):"
3472                       << "\n    original: " << *Sel
3473                       << "\n              " << GEPI);
3474 
3475     IRB.SetInsertPoint(&GEPI);
3476     SmallVector<Value *, 4> Index(GEPI.indices());
3477     bool IsInBounds = GEPI.isInBounds();
3478 
3479     Type *Ty = GEPI.getSourceElementType();
3480     Value *True = Sel->getTrueValue();
3481     Value *NTrue =
3482         IsInBounds
3483             ? IRB.CreateInBoundsGEP(Ty, True, Index,
3484                                     True->getName() + ".sroa.gep")
3485             : IRB.CreateGEP(Ty, True, Index, True->getName() + ".sroa.gep");
3486 
3487     Value *False = Sel->getFalseValue();
3488 
3489     Value *NFalse =
3490         IsInBounds
3491             ? IRB.CreateInBoundsGEP(Ty, False, Index,
3492                                     False->getName() + ".sroa.gep")
3493             : IRB.CreateGEP(Ty, False, Index, False->getName() + ".sroa.gep");
3494 
3495     Value *NSel = IRB.CreateSelect(Sel->getCondition(), NTrue, NFalse,
3496                                    Sel->getName() + ".sroa.sel");
3497     Visited.erase(&GEPI);
3498     GEPI.replaceAllUsesWith(NSel);
3499     GEPI.eraseFromParent();
3500     Instruction *NSelI = cast<Instruction>(NSel);
3501     Visited.insert(NSelI);
3502     enqueueUsers(*NSelI);
3503 
3504     LLVM_DEBUG(dbgs() << "\n          to: " << *NTrue
3505                       << "\n              " << *NFalse
3506                       << "\n              " << *NSel << '\n');
3507 
3508     return true;
3509   }
3510 
3511   // Fold gep (phi ptr1, ptr2) => phi gep(ptr1), gep(ptr2)
3512   bool foldGEPPhi(GetElementPtrInst &GEPI) {
3513     if (!GEPI.hasAllConstantIndices())
3514       return false;
3515 
3516     PHINode *PHI = cast<PHINode>(GEPI.getPointerOperand());
3517     if (GEPI.getParent() != PHI->getParent() ||
3518         llvm::any_of(PHI->incoming_values(), [](Value *In)
3519           { Instruction *I = dyn_cast<Instruction>(In);
3520             return !I || isa<GetElementPtrInst>(I) || isa<PHINode>(I) ||
3521                    succ_empty(I->getParent()) ||
3522                    !I->getParent()->isLegalToHoistInto();
3523           }))
3524       return false;
3525 
3526     LLVM_DEBUG(dbgs() << "  Rewriting gep(phi) -> phi(gep):"
3527                       << "\n    original: " << *PHI
3528                       << "\n              " << GEPI
3529                       << "\n          to: ");
3530 
3531     SmallVector<Value *, 4> Index(GEPI.indices());
3532     bool IsInBounds = GEPI.isInBounds();
3533     IRB.SetInsertPoint(GEPI.getParent()->getFirstNonPHI());
3534     PHINode *NewPN = IRB.CreatePHI(GEPI.getType(), PHI->getNumIncomingValues(),
3535                                    PHI->getName() + ".sroa.phi");
3536     for (unsigned I = 0, E = PHI->getNumIncomingValues(); I != E; ++I) {
3537       BasicBlock *B = PHI->getIncomingBlock(I);
3538       Value *NewVal = nullptr;
3539       int Idx = NewPN->getBasicBlockIndex(B);
3540       if (Idx >= 0) {
3541         NewVal = NewPN->getIncomingValue(Idx);
3542       } else {
3543         Instruction *In = cast<Instruction>(PHI->getIncomingValue(I));
3544 
3545         IRB.SetInsertPoint(In->getParent(), std::next(In->getIterator()));
3546         Type *Ty = GEPI.getSourceElementType();
3547         NewVal = IsInBounds ? IRB.CreateInBoundsGEP(Ty, In, Index,
3548                                                     In->getName() + ".sroa.gep")
3549                             : IRB.CreateGEP(Ty, In, Index,
3550                                             In->getName() + ".sroa.gep");
3551       }
3552       NewPN->addIncoming(NewVal, B);
3553     }
3554 
3555     Visited.erase(&GEPI);
3556     GEPI.replaceAllUsesWith(NewPN);
3557     GEPI.eraseFromParent();
3558     Visited.insert(NewPN);
3559     enqueueUsers(*NewPN);
3560 
3561     LLVM_DEBUG(for (Value *In : NewPN->incoming_values())
3562                  dbgs() << "\n              " << *In;
3563                dbgs() << "\n              " << *NewPN << '\n');
3564 
3565     return true;
3566   }
3567 
3568   bool visitGetElementPtrInst(GetElementPtrInst &GEPI) {
3569     if (isa<SelectInst>(GEPI.getPointerOperand()) &&
3570         foldGEPSelect(GEPI))
3571       return true;
3572 
3573     if (isa<PHINode>(GEPI.getPointerOperand()) &&
3574         foldGEPPhi(GEPI))
3575       return true;
3576 
3577     enqueueUsers(GEPI);
3578     return false;
3579   }
3580 
3581   bool visitPHINode(PHINode &PN) {
3582     enqueueUsers(PN);
3583     return false;
3584   }
3585 
3586   bool visitSelectInst(SelectInst &SI) {
3587     enqueueUsers(SI);
3588     return false;
3589   }
3590 };
3591 
3592 } // end anonymous namespace
3593 
3594 /// Strip aggregate type wrapping.
3595 ///
3596 /// This removes no-op aggregate types wrapping an underlying type. It will
3597 /// strip as many layers of types as it can without changing either the type
3598 /// size or the allocated size.
3599 static Type *stripAggregateTypeWrapping(const DataLayout &DL, Type *Ty) {
3600   if (Ty->isSingleValueType())
3601     return Ty;
3602 
3603   uint64_t AllocSize = DL.getTypeAllocSize(Ty).getFixedSize();
3604   uint64_t TypeSize = DL.getTypeSizeInBits(Ty).getFixedSize();
3605 
3606   Type *InnerTy;
3607   if (ArrayType *ArrTy = dyn_cast<ArrayType>(Ty)) {
3608     InnerTy = ArrTy->getElementType();
3609   } else if (StructType *STy = dyn_cast<StructType>(Ty)) {
3610     const StructLayout *SL = DL.getStructLayout(STy);
3611     unsigned Index = SL->getElementContainingOffset(0);
3612     InnerTy = STy->getElementType(Index);
3613   } else {
3614     return Ty;
3615   }
3616 
3617   if (AllocSize > DL.getTypeAllocSize(InnerTy).getFixedSize() ||
3618       TypeSize > DL.getTypeSizeInBits(InnerTy).getFixedSize())
3619     return Ty;
3620 
3621   return stripAggregateTypeWrapping(DL, InnerTy);
3622 }
3623 
3624 /// Try to find a partition of the aggregate type passed in for a given
3625 /// offset and size.
3626 ///
3627 /// This recurses through the aggregate type and tries to compute a subtype
3628 /// based on the offset and size. When the offset and size span a sub-section
3629 /// of an array, it will even compute a new array type for that sub-section,
3630 /// and the same for structs.
3631 ///
3632 /// Note that this routine is very strict and tries to find a partition of the
3633 /// type which produces the *exact* right offset and size. It is not forgiving
3634 /// when the size or offset cause either end of type-based partition to be off.
3635 /// Also, this is a best-effort routine. It is reasonable to give up and not
3636 /// return a type if necessary.
3637 static Type *getTypePartition(const DataLayout &DL, Type *Ty, uint64_t Offset,
3638                               uint64_t Size) {
3639   if (Offset == 0 && DL.getTypeAllocSize(Ty).getFixedSize() == Size)
3640     return stripAggregateTypeWrapping(DL, Ty);
3641   if (Offset > DL.getTypeAllocSize(Ty).getFixedSize() ||
3642       (DL.getTypeAllocSize(Ty).getFixedSize() - Offset) < Size)
3643     return nullptr;
3644 
3645   if (isa<ArrayType>(Ty) || isa<VectorType>(Ty)) {
3646      Type *ElementTy;
3647      uint64_t TyNumElements;
3648      if (auto *AT = dyn_cast<ArrayType>(Ty)) {
3649        ElementTy = AT->getElementType();
3650        TyNumElements = AT->getNumElements();
3651      } else {
3652        // FIXME: This isn't right for vectors with non-byte-sized or
3653        // non-power-of-two sized elements.
3654        auto *VT = cast<FixedVectorType>(Ty);
3655        ElementTy = VT->getElementType();
3656        TyNumElements = VT->getNumElements();
3657     }
3658     uint64_t ElementSize = DL.getTypeAllocSize(ElementTy).getFixedSize();
3659     uint64_t NumSkippedElements = Offset / ElementSize;
3660     if (NumSkippedElements >= TyNumElements)
3661       return nullptr;
3662     Offset -= NumSkippedElements * ElementSize;
3663 
3664     // First check if we need to recurse.
3665     if (Offset > 0 || Size < ElementSize) {
3666       // Bail if the partition ends in a different array element.
3667       if ((Offset + Size) > ElementSize)
3668         return nullptr;
3669       // Recurse through the element type trying to peel off offset bytes.
3670       return getTypePartition(DL, ElementTy, Offset, Size);
3671     }
3672     assert(Offset == 0);
3673 
3674     if (Size == ElementSize)
3675       return stripAggregateTypeWrapping(DL, ElementTy);
3676     assert(Size > ElementSize);
3677     uint64_t NumElements = Size / ElementSize;
3678     if (NumElements * ElementSize != Size)
3679       return nullptr;
3680     return ArrayType::get(ElementTy, NumElements);
3681   }
3682 
3683   StructType *STy = dyn_cast<StructType>(Ty);
3684   if (!STy)
3685     return nullptr;
3686 
3687   const StructLayout *SL = DL.getStructLayout(STy);
3688   if (Offset >= SL->getSizeInBytes())
3689     return nullptr;
3690   uint64_t EndOffset = Offset + Size;
3691   if (EndOffset > SL->getSizeInBytes())
3692     return nullptr;
3693 
3694   unsigned Index = SL->getElementContainingOffset(Offset);
3695   Offset -= SL->getElementOffset(Index);
3696 
3697   Type *ElementTy = STy->getElementType(Index);
3698   uint64_t ElementSize = DL.getTypeAllocSize(ElementTy).getFixedSize();
3699   if (Offset >= ElementSize)
3700     return nullptr; // The offset points into alignment padding.
3701 
3702   // See if any partition must be contained by the element.
3703   if (Offset > 0 || Size < ElementSize) {
3704     if ((Offset + Size) > ElementSize)
3705       return nullptr;
3706     return getTypePartition(DL, ElementTy, Offset, Size);
3707   }
3708   assert(Offset == 0);
3709 
3710   if (Size == ElementSize)
3711     return stripAggregateTypeWrapping(DL, ElementTy);
3712 
3713   StructType::element_iterator EI = STy->element_begin() + Index,
3714                                EE = STy->element_end();
3715   if (EndOffset < SL->getSizeInBytes()) {
3716     unsigned EndIndex = SL->getElementContainingOffset(EndOffset);
3717     if (Index == EndIndex)
3718       return nullptr; // Within a single element and its padding.
3719 
3720     // Don't try to form "natural" types if the elements don't line up with the
3721     // expected size.
3722     // FIXME: We could potentially recurse down through the last element in the
3723     // sub-struct to find a natural end point.
3724     if (SL->getElementOffset(EndIndex) != EndOffset)
3725       return nullptr;
3726 
3727     assert(Index < EndIndex);
3728     EE = STy->element_begin() + EndIndex;
3729   }
3730 
3731   // Try to build up a sub-structure.
3732   StructType *SubTy =
3733       StructType::get(STy->getContext(), makeArrayRef(EI, EE), STy->isPacked());
3734   const StructLayout *SubSL = DL.getStructLayout(SubTy);
3735   if (Size != SubSL->getSizeInBytes())
3736     return nullptr; // The sub-struct doesn't have quite the size needed.
3737 
3738   return SubTy;
3739 }
3740 
3741 /// Pre-split loads and stores to simplify rewriting.
3742 ///
3743 /// We want to break up the splittable load+store pairs as much as
3744 /// possible. This is important to do as a preprocessing step, as once we
3745 /// start rewriting the accesses to partitions of the alloca we lose the
3746 /// necessary information to correctly split apart paired loads and stores
3747 /// which both point into this alloca. The case to consider is something like
3748 /// the following:
3749 ///
3750 ///   %a = alloca [12 x i8]
3751 ///   %gep1 = getelementptr [12 x i8]* %a, i32 0, i32 0
3752 ///   %gep2 = getelementptr [12 x i8]* %a, i32 0, i32 4
3753 ///   %gep3 = getelementptr [12 x i8]* %a, i32 0, i32 8
3754 ///   %iptr1 = bitcast i8* %gep1 to i64*
3755 ///   %iptr2 = bitcast i8* %gep2 to i64*
3756 ///   %fptr1 = bitcast i8* %gep1 to float*
3757 ///   %fptr2 = bitcast i8* %gep2 to float*
3758 ///   %fptr3 = bitcast i8* %gep3 to float*
3759 ///   store float 0.0, float* %fptr1
3760 ///   store float 1.0, float* %fptr2
3761 ///   %v = load i64* %iptr1
3762 ///   store i64 %v, i64* %iptr2
3763 ///   %f1 = load float* %fptr2
3764 ///   %f2 = load float* %fptr3
3765 ///
3766 /// Here we want to form 3 partitions of the alloca, each 4 bytes large, and
3767 /// promote everything so we recover the 2 SSA values that should have been
3768 /// there all along.
3769 ///
3770 /// \returns true if any changes are made.
3771 bool SROAPass::presplitLoadsAndStores(AllocaInst &AI, AllocaSlices &AS) {
3772   LLVM_DEBUG(dbgs() << "Pre-splitting loads and stores\n");
3773 
3774   // Track the loads and stores which are candidates for pre-splitting here, in
3775   // the order they first appear during the partition scan. These give stable
3776   // iteration order and a basis for tracking which loads and stores we
3777   // actually split.
3778   SmallVector<LoadInst *, 4> Loads;
3779   SmallVector<StoreInst *, 4> Stores;
3780 
3781   // We need to accumulate the splits required of each load or store where we
3782   // can find them via a direct lookup. This is important to cross-check loads
3783   // and stores against each other. We also track the slice so that we can kill
3784   // all the slices that end up split.
3785   struct SplitOffsets {
3786     Slice *S;
3787     std::vector<uint64_t> Splits;
3788   };
3789   SmallDenseMap<Instruction *, SplitOffsets, 8> SplitOffsetsMap;
3790 
3791   // Track loads out of this alloca which cannot, for any reason, be pre-split.
3792   // This is important as we also cannot pre-split stores of those loads!
3793   // FIXME: This is all pretty gross. It means that we can be more aggressive
3794   // in pre-splitting when the load feeding the store happens to come from
3795   // a separate alloca. Put another way, the effectiveness of SROA would be
3796   // decreased by a frontend which just concatenated all of its local allocas
3797   // into one big flat alloca. But defeating such patterns is exactly the job
3798   // SROA is tasked with! Sadly, to not have this discrepancy we would have
3799   // change store pre-splitting to actually force pre-splitting of the load
3800   // that feeds it *and all stores*. That makes pre-splitting much harder, but
3801   // maybe it would make it more principled?
3802   SmallPtrSet<LoadInst *, 8> UnsplittableLoads;
3803 
3804   LLVM_DEBUG(dbgs() << "  Searching for candidate loads and stores\n");
3805   for (auto &P : AS.partitions()) {
3806     for (Slice &S : P) {
3807       Instruction *I = cast<Instruction>(S.getUse()->getUser());
3808       if (!S.isSplittable() || S.endOffset() <= P.endOffset()) {
3809         // If this is a load we have to track that it can't participate in any
3810         // pre-splitting. If this is a store of a load we have to track that
3811         // that load also can't participate in any pre-splitting.
3812         if (auto *LI = dyn_cast<LoadInst>(I))
3813           UnsplittableLoads.insert(LI);
3814         else if (auto *SI = dyn_cast<StoreInst>(I))
3815           if (auto *LI = dyn_cast<LoadInst>(SI->getValueOperand()))
3816             UnsplittableLoads.insert(LI);
3817         continue;
3818       }
3819       assert(P.endOffset() > S.beginOffset() &&
3820              "Empty or backwards partition!");
3821 
3822       // Determine if this is a pre-splittable slice.
3823       if (auto *LI = dyn_cast<LoadInst>(I)) {
3824         assert(!LI->isVolatile() && "Cannot split volatile loads!");
3825 
3826         // The load must be used exclusively to store into other pointers for
3827         // us to be able to arbitrarily pre-split it. The stores must also be
3828         // simple to avoid changing semantics.
3829         auto IsLoadSimplyStored = [](LoadInst *LI) {
3830           for (User *LU : LI->users()) {
3831             auto *SI = dyn_cast<StoreInst>(LU);
3832             if (!SI || !SI->isSimple())
3833               return false;
3834           }
3835           return true;
3836         };
3837         if (!IsLoadSimplyStored(LI)) {
3838           UnsplittableLoads.insert(LI);
3839           continue;
3840         }
3841 
3842         Loads.push_back(LI);
3843       } else if (auto *SI = dyn_cast<StoreInst>(I)) {
3844         if (S.getUse() != &SI->getOperandUse(SI->getPointerOperandIndex()))
3845           // Skip stores *of* pointers. FIXME: This shouldn't even be possible!
3846           continue;
3847         auto *StoredLoad = dyn_cast<LoadInst>(SI->getValueOperand());
3848         if (!StoredLoad || !StoredLoad->isSimple())
3849           continue;
3850         assert(!SI->isVolatile() && "Cannot split volatile stores!");
3851 
3852         Stores.push_back(SI);
3853       } else {
3854         // Other uses cannot be pre-split.
3855         continue;
3856       }
3857 
3858       // Record the initial split.
3859       LLVM_DEBUG(dbgs() << "    Candidate: " << *I << "\n");
3860       auto &Offsets = SplitOffsetsMap[I];
3861       assert(Offsets.Splits.empty() &&
3862              "Should not have splits the first time we see an instruction!");
3863       Offsets.S = &S;
3864       Offsets.Splits.push_back(P.endOffset() - S.beginOffset());
3865     }
3866 
3867     // Now scan the already split slices, and add a split for any of them which
3868     // we're going to pre-split.
3869     for (Slice *S : P.splitSliceTails()) {
3870       auto SplitOffsetsMapI =
3871           SplitOffsetsMap.find(cast<Instruction>(S->getUse()->getUser()));
3872       if (SplitOffsetsMapI == SplitOffsetsMap.end())
3873         continue;
3874       auto &Offsets = SplitOffsetsMapI->second;
3875 
3876       assert(Offsets.S == S && "Found a mismatched slice!");
3877       assert(!Offsets.Splits.empty() &&
3878              "Cannot have an empty set of splits on the second partition!");
3879       assert(Offsets.Splits.back() ==
3880                  P.beginOffset() - Offsets.S->beginOffset() &&
3881              "Previous split does not end where this one begins!");
3882 
3883       // Record each split. The last partition's end isn't needed as the size
3884       // of the slice dictates that.
3885       if (S->endOffset() > P.endOffset())
3886         Offsets.Splits.push_back(P.endOffset() - Offsets.S->beginOffset());
3887     }
3888   }
3889 
3890   // We may have split loads where some of their stores are split stores. For
3891   // such loads and stores, we can only pre-split them if their splits exactly
3892   // match relative to their starting offset. We have to verify this prior to
3893   // any rewriting.
3894   llvm::erase_if(Stores, [&UnsplittableLoads, &SplitOffsetsMap](StoreInst *SI) {
3895     // Lookup the load we are storing in our map of split
3896     // offsets.
3897     auto *LI = cast<LoadInst>(SI->getValueOperand());
3898     // If it was completely unsplittable, then we're done,
3899     // and this store can't be pre-split.
3900     if (UnsplittableLoads.count(LI))
3901       return true;
3902 
3903     auto LoadOffsetsI = SplitOffsetsMap.find(LI);
3904     if (LoadOffsetsI == SplitOffsetsMap.end())
3905       return false; // Unrelated loads are definitely safe.
3906     auto &LoadOffsets = LoadOffsetsI->second;
3907 
3908     // Now lookup the store's offsets.
3909     auto &StoreOffsets = SplitOffsetsMap[SI];
3910 
3911     // If the relative offsets of each split in the load and
3912     // store match exactly, then we can split them and we
3913     // don't need to remove them here.
3914     if (LoadOffsets.Splits == StoreOffsets.Splits)
3915       return false;
3916 
3917     LLVM_DEBUG(dbgs() << "    Mismatched splits for load and store:\n"
3918                       << "      " << *LI << "\n"
3919                       << "      " << *SI << "\n");
3920 
3921     // We've found a store and load that we need to split
3922     // with mismatched relative splits. Just give up on them
3923     // and remove both instructions from our list of
3924     // candidates.
3925     UnsplittableLoads.insert(LI);
3926     return true;
3927   });
3928   // Now we have to go *back* through all the stores, because a later store may
3929   // have caused an earlier store's load to become unsplittable and if it is
3930   // unsplittable for the later store, then we can't rely on it being split in
3931   // the earlier store either.
3932   llvm::erase_if(Stores, [&UnsplittableLoads](StoreInst *SI) {
3933     auto *LI = cast<LoadInst>(SI->getValueOperand());
3934     return UnsplittableLoads.count(LI);
3935   });
3936   // Once we've established all the loads that can't be split for some reason,
3937   // filter any that made it into our list out.
3938   llvm::erase_if(Loads, [&UnsplittableLoads](LoadInst *LI) {
3939     return UnsplittableLoads.count(LI);
3940   });
3941 
3942   // If no loads or stores are left, there is no pre-splitting to be done for
3943   // this alloca.
3944   if (Loads.empty() && Stores.empty())
3945     return false;
3946 
3947   // From here on, we can't fail and will be building new accesses, so rig up
3948   // an IR builder.
3949   IRBuilderTy IRB(&AI);
3950 
3951   // Collect the new slices which we will merge into the alloca slices.
3952   SmallVector<Slice, 4> NewSlices;
3953 
3954   // Track any allocas we end up splitting loads and stores for so we iterate
3955   // on them.
3956   SmallPtrSet<AllocaInst *, 4> ResplitPromotableAllocas;
3957 
3958   // At this point, we have collected all of the loads and stores we can
3959   // pre-split, and the specific splits needed for them. We actually do the
3960   // splitting in a specific order in order to handle when one of the loads in
3961   // the value operand to one of the stores.
3962   //
3963   // First, we rewrite all of the split loads, and just accumulate each split
3964   // load in a parallel structure. We also build the slices for them and append
3965   // them to the alloca slices.
3966   SmallDenseMap<LoadInst *, std::vector<LoadInst *>, 1> SplitLoadsMap;
3967   std::vector<LoadInst *> SplitLoads;
3968   const DataLayout &DL = AI.getModule()->getDataLayout();
3969   for (LoadInst *LI : Loads) {
3970     SplitLoads.clear();
3971 
3972     IntegerType *Ty = cast<IntegerType>(LI->getType());
3973     assert(Ty->getBitWidth() % 8 == 0);
3974     uint64_t LoadSize = Ty->getBitWidth() / 8;
3975     assert(LoadSize > 0 && "Cannot have a zero-sized integer load!");
3976 
3977     auto &Offsets = SplitOffsetsMap[LI];
3978     assert(LoadSize == Offsets.S->endOffset() - Offsets.S->beginOffset() &&
3979            "Slice size should always match load size exactly!");
3980     uint64_t BaseOffset = Offsets.S->beginOffset();
3981     assert(BaseOffset + LoadSize > BaseOffset &&
3982            "Cannot represent alloca access size using 64-bit integers!");
3983 
3984     Instruction *BasePtr = cast<Instruction>(LI->getPointerOperand());
3985     IRB.SetInsertPoint(LI);
3986 
3987     LLVM_DEBUG(dbgs() << "  Splitting load: " << *LI << "\n");
3988 
3989     uint64_t PartOffset = 0, PartSize = Offsets.Splits.front();
3990     int Idx = 0, Size = Offsets.Splits.size();
3991     for (;;) {
3992       auto *PartTy = Type::getIntNTy(Ty->getContext(), PartSize * 8);
3993       auto AS = LI->getPointerAddressSpace();
3994       auto *PartPtrTy = PartTy->getPointerTo(AS);
3995       LoadInst *PLoad = IRB.CreateAlignedLoad(
3996           PartTy,
3997           getAdjustedPtr(IRB, DL, BasePtr,
3998                          APInt(DL.getIndexSizeInBits(AS), PartOffset),
3999                          PartPtrTy, BasePtr->getName() + "."),
4000           getAdjustedAlignment(LI, PartOffset),
4001           /*IsVolatile*/ false, LI->getName());
4002       PLoad->copyMetadata(*LI, {LLVMContext::MD_mem_parallel_loop_access,
4003                                 LLVMContext::MD_access_group});
4004 
4005       // Append this load onto the list of split loads so we can find it later
4006       // to rewrite the stores.
4007       SplitLoads.push_back(PLoad);
4008 
4009       // Now build a new slice for the alloca.
4010       NewSlices.push_back(
4011           Slice(BaseOffset + PartOffset, BaseOffset + PartOffset + PartSize,
4012                 &PLoad->getOperandUse(PLoad->getPointerOperandIndex()),
4013                 /*IsSplittable*/ false));
4014       LLVM_DEBUG(dbgs() << "    new slice [" << NewSlices.back().beginOffset()
4015                         << ", " << NewSlices.back().endOffset()
4016                         << "): " << *PLoad << "\n");
4017 
4018       // See if we've handled all the splits.
4019       if (Idx >= Size)
4020         break;
4021 
4022       // Setup the next partition.
4023       PartOffset = Offsets.Splits[Idx];
4024       ++Idx;
4025       PartSize = (Idx < Size ? Offsets.Splits[Idx] : LoadSize) - PartOffset;
4026     }
4027 
4028     // Now that we have the split loads, do the slow walk over all uses of the
4029     // load and rewrite them as split stores, or save the split loads to use
4030     // below if the store is going to be split there anyways.
4031     bool DeferredStores = false;
4032     for (User *LU : LI->users()) {
4033       StoreInst *SI = cast<StoreInst>(LU);
4034       if (!Stores.empty() && SplitOffsetsMap.count(SI)) {
4035         DeferredStores = true;
4036         LLVM_DEBUG(dbgs() << "    Deferred splitting of store: " << *SI
4037                           << "\n");
4038         continue;
4039       }
4040 
4041       Value *StoreBasePtr = SI->getPointerOperand();
4042       IRB.SetInsertPoint(SI);
4043 
4044       LLVM_DEBUG(dbgs() << "    Splitting store of load: " << *SI << "\n");
4045 
4046       for (int Idx = 0, Size = SplitLoads.size(); Idx < Size; ++Idx) {
4047         LoadInst *PLoad = SplitLoads[Idx];
4048         uint64_t PartOffset = Idx == 0 ? 0 : Offsets.Splits[Idx - 1];
4049         auto *PartPtrTy =
4050             PLoad->getType()->getPointerTo(SI->getPointerAddressSpace());
4051 
4052         auto AS = SI->getPointerAddressSpace();
4053         StoreInst *PStore = IRB.CreateAlignedStore(
4054             PLoad,
4055             getAdjustedPtr(IRB, DL, StoreBasePtr,
4056                            APInt(DL.getIndexSizeInBits(AS), PartOffset),
4057                            PartPtrTy, StoreBasePtr->getName() + "."),
4058             getAdjustedAlignment(SI, PartOffset),
4059             /*IsVolatile*/ false);
4060         PStore->copyMetadata(*SI, {LLVMContext::MD_mem_parallel_loop_access,
4061                                    LLVMContext::MD_access_group});
4062         LLVM_DEBUG(dbgs() << "      +" << PartOffset << ":" << *PStore << "\n");
4063       }
4064 
4065       // We want to immediately iterate on any allocas impacted by splitting
4066       // this store, and we have to track any promotable alloca (indicated by
4067       // a direct store) as needing to be resplit because it is no longer
4068       // promotable.
4069       if (AllocaInst *OtherAI = dyn_cast<AllocaInst>(StoreBasePtr)) {
4070         ResplitPromotableAllocas.insert(OtherAI);
4071         Worklist.insert(OtherAI);
4072       } else if (AllocaInst *OtherAI = dyn_cast<AllocaInst>(
4073                      StoreBasePtr->stripInBoundsOffsets())) {
4074         Worklist.insert(OtherAI);
4075       }
4076 
4077       // Mark the original store as dead.
4078       DeadInsts.push_back(SI);
4079     }
4080 
4081     // Save the split loads if there are deferred stores among the users.
4082     if (DeferredStores)
4083       SplitLoadsMap.insert(std::make_pair(LI, std::move(SplitLoads)));
4084 
4085     // Mark the original load as dead and kill the original slice.
4086     DeadInsts.push_back(LI);
4087     Offsets.S->kill();
4088   }
4089 
4090   // Second, we rewrite all of the split stores. At this point, we know that
4091   // all loads from this alloca have been split already. For stores of such
4092   // loads, we can simply look up the pre-existing split loads. For stores of
4093   // other loads, we split those loads first and then write split stores of
4094   // them.
4095   for (StoreInst *SI : Stores) {
4096     auto *LI = cast<LoadInst>(SI->getValueOperand());
4097     IntegerType *Ty = cast<IntegerType>(LI->getType());
4098     assert(Ty->getBitWidth() % 8 == 0);
4099     uint64_t StoreSize = Ty->getBitWidth() / 8;
4100     assert(StoreSize > 0 && "Cannot have a zero-sized integer store!");
4101 
4102     auto &Offsets = SplitOffsetsMap[SI];
4103     assert(StoreSize == Offsets.S->endOffset() - Offsets.S->beginOffset() &&
4104            "Slice size should always match load size exactly!");
4105     uint64_t BaseOffset = Offsets.S->beginOffset();
4106     assert(BaseOffset + StoreSize > BaseOffset &&
4107            "Cannot represent alloca access size using 64-bit integers!");
4108 
4109     Value *LoadBasePtr = LI->getPointerOperand();
4110     Instruction *StoreBasePtr = cast<Instruction>(SI->getPointerOperand());
4111 
4112     LLVM_DEBUG(dbgs() << "  Splitting store: " << *SI << "\n");
4113 
4114     // Check whether we have an already split load.
4115     auto SplitLoadsMapI = SplitLoadsMap.find(LI);
4116     std::vector<LoadInst *> *SplitLoads = nullptr;
4117     if (SplitLoadsMapI != SplitLoadsMap.end()) {
4118       SplitLoads = &SplitLoadsMapI->second;
4119       assert(SplitLoads->size() == Offsets.Splits.size() + 1 &&
4120              "Too few split loads for the number of splits in the store!");
4121     } else {
4122       LLVM_DEBUG(dbgs() << "          of load: " << *LI << "\n");
4123     }
4124 
4125     uint64_t PartOffset = 0, PartSize = Offsets.Splits.front();
4126     int Idx = 0, Size = Offsets.Splits.size();
4127     for (;;) {
4128       auto *PartTy = Type::getIntNTy(Ty->getContext(), PartSize * 8);
4129       auto *LoadPartPtrTy = PartTy->getPointerTo(LI->getPointerAddressSpace());
4130       auto *StorePartPtrTy = PartTy->getPointerTo(SI->getPointerAddressSpace());
4131 
4132       // Either lookup a split load or create one.
4133       LoadInst *PLoad;
4134       if (SplitLoads) {
4135         PLoad = (*SplitLoads)[Idx];
4136       } else {
4137         IRB.SetInsertPoint(LI);
4138         auto AS = LI->getPointerAddressSpace();
4139         PLoad = IRB.CreateAlignedLoad(
4140             PartTy,
4141             getAdjustedPtr(IRB, DL, LoadBasePtr,
4142                            APInt(DL.getIndexSizeInBits(AS), PartOffset),
4143                            LoadPartPtrTy, LoadBasePtr->getName() + "."),
4144             getAdjustedAlignment(LI, PartOffset),
4145             /*IsVolatile*/ false, LI->getName());
4146         PLoad->copyMetadata(*LI, {LLVMContext::MD_mem_parallel_loop_access,
4147                                   LLVMContext::MD_access_group});
4148       }
4149 
4150       // And store this partition.
4151       IRB.SetInsertPoint(SI);
4152       auto AS = SI->getPointerAddressSpace();
4153       StoreInst *PStore = IRB.CreateAlignedStore(
4154           PLoad,
4155           getAdjustedPtr(IRB, DL, StoreBasePtr,
4156                          APInt(DL.getIndexSizeInBits(AS), PartOffset),
4157                          StorePartPtrTy, StoreBasePtr->getName() + "."),
4158           getAdjustedAlignment(SI, PartOffset),
4159           /*IsVolatile*/ false);
4160       PStore->copyMetadata(*SI, {LLVMContext::MD_mem_parallel_loop_access,
4161                                  LLVMContext::MD_access_group});
4162 
4163       // Now build a new slice for the alloca.
4164       NewSlices.push_back(
4165           Slice(BaseOffset + PartOffset, BaseOffset + PartOffset + PartSize,
4166                 &PStore->getOperandUse(PStore->getPointerOperandIndex()),
4167                 /*IsSplittable*/ false));
4168       LLVM_DEBUG(dbgs() << "    new slice [" << NewSlices.back().beginOffset()
4169                         << ", " << NewSlices.back().endOffset()
4170                         << "): " << *PStore << "\n");
4171       if (!SplitLoads) {
4172         LLVM_DEBUG(dbgs() << "      of split load: " << *PLoad << "\n");
4173       }
4174 
4175       // See if we've finished all the splits.
4176       if (Idx >= Size)
4177         break;
4178 
4179       // Setup the next partition.
4180       PartOffset = Offsets.Splits[Idx];
4181       ++Idx;
4182       PartSize = (Idx < Size ? Offsets.Splits[Idx] : StoreSize) - PartOffset;
4183     }
4184 
4185     // We want to immediately iterate on any allocas impacted by splitting
4186     // this load, which is only relevant if it isn't a load of this alloca and
4187     // thus we didn't already split the loads above. We also have to keep track
4188     // of any promotable allocas we split loads on as they can no longer be
4189     // promoted.
4190     if (!SplitLoads) {
4191       if (AllocaInst *OtherAI = dyn_cast<AllocaInst>(LoadBasePtr)) {
4192         assert(OtherAI != &AI && "We can't re-split our own alloca!");
4193         ResplitPromotableAllocas.insert(OtherAI);
4194         Worklist.insert(OtherAI);
4195       } else if (AllocaInst *OtherAI = dyn_cast<AllocaInst>(
4196                      LoadBasePtr->stripInBoundsOffsets())) {
4197         assert(OtherAI != &AI && "We can't re-split our own alloca!");
4198         Worklist.insert(OtherAI);
4199       }
4200     }
4201 
4202     // Mark the original store as dead now that we've split it up and kill its
4203     // slice. Note that we leave the original load in place unless this store
4204     // was its only use. It may in turn be split up if it is an alloca load
4205     // for some other alloca, but it may be a normal load. This may introduce
4206     // redundant loads, but where those can be merged the rest of the optimizer
4207     // should handle the merging, and this uncovers SSA splits which is more
4208     // important. In practice, the original loads will almost always be fully
4209     // split and removed eventually, and the splits will be merged by any
4210     // trivial CSE, including instcombine.
4211     if (LI->hasOneUse()) {
4212       assert(*LI->user_begin() == SI && "Single use isn't this store!");
4213       DeadInsts.push_back(LI);
4214     }
4215     DeadInsts.push_back(SI);
4216     Offsets.S->kill();
4217   }
4218 
4219   // Remove the killed slices that have ben pre-split.
4220   llvm::erase_if(AS, [](const Slice &S) { return S.isDead(); });
4221 
4222   // Insert our new slices. This will sort and merge them into the sorted
4223   // sequence.
4224   AS.insert(NewSlices);
4225 
4226   LLVM_DEBUG(dbgs() << "  Pre-split slices:\n");
4227 #ifndef NDEBUG
4228   for (auto I = AS.begin(), E = AS.end(); I != E; ++I)
4229     LLVM_DEBUG(AS.print(dbgs(), I, "    "));
4230 #endif
4231 
4232   // Finally, don't try to promote any allocas that new require re-splitting.
4233   // They have already been added to the worklist above.
4234   llvm::erase_if(PromotableAllocas, [&](AllocaInst *AI) {
4235     return ResplitPromotableAllocas.count(AI);
4236   });
4237 
4238   return true;
4239 }
4240 
4241 /// Rewrite an alloca partition's users.
4242 ///
4243 /// This routine drives both of the rewriting goals of the SROA pass. It tries
4244 /// to rewrite uses of an alloca partition to be conducive for SSA value
4245 /// promotion. If the partition needs a new, more refined alloca, this will
4246 /// build that new alloca, preserving as much type information as possible, and
4247 /// rewrite the uses of the old alloca to point at the new one and have the
4248 /// appropriate new offsets. It also evaluates how successful the rewrite was
4249 /// at enabling promotion and if it was successful queues the alloca to be
4250 /// promoted.
4251 AllocaInst *SROAPass::rewritePartition(AllocaInst &AI, AllocaSlices &AS,
4252                                        Partition &P) {
4253   // Try to compute a friendly type for this partition of the alloca. This
4254   // won't always succeed, in which case we fall back to a legal integer type
4255   // or an i8 array of an appropriate size.
4256   Type *SliceTy = nullptr;
4257   const DataLayout &DL = AI.getModule()->getDataLayout();
4258   std::pair<Type *, IntegerType *> CommonUseTy =
4259       findCommonType(P.begin(), P.end(), P.endOffset());
4260   // Do all uses operate on the same type?
4261   if (CommonUseTy.first)
4262     if (DL.getTypeAllocSize(CommonUseTy.first).getFixedSize() >= P.size())
4263       SliceTy = CommonUseTy.first;
4264   // If not, can we find an appropriate subtype in the original allocated type?
4265   if (!SliceTy)
4266     if (Type *TypePartitionTy = getTypePartition(DL, AI.getAllocatedType(),
4267                                                  P.beginOffset(), P.size()))
4268       SliceTy = TypePartitionTy;
4269   // If still not, can we use the largest bitwidth integer type used?
4270   if (!SliceTy && CommonUseTy.second)
4271     if (DL.getTypeAllocSize(CommonUseTy.second).getFixedSize() >= P.size())
4272       SliceTy = CommonUseTy.second;
4273   if ((!SliceTy || (SliceTy->isArrayTy() &&
4274                     SliceTy->getArrayElementType()->isIntegerTy())) &&
4275       DL.isLegalInteger(P.size() * 8))
4276     SliceTy = Type::getIntNTy(*C, P.size() * 8);
4277   if (!SliceTy)
4278     SliceTy = ArrayType::get(Type::getInt8Ty(*C), P.size());
4279   assert(DL.getTypeAllocSize(SliceTy).getFixedSize() >= P.size());
4280 
4281   bool IsIntegerPromotable = isIntegerWideningViable(P, SliceTy, DL);
4282 
4283   VectorType *VecTy =
4284       IsIntegerPromotable ? nullptr : isVectorPromotionViable(P, DL);
4285   if (VecTy)
4286     SliceTy = VecTy;
4287 
4288   // Check for the case where we're going to rewrite to a new alloca of the
4289   // exact same type as the original, and with the same access offsets. In that
4290   // case, re-use the existing alloca, but still run through the rewriter to
4291   // perform phi and select speculation.
4292   // P.beginOffset() can be non-zero even with the same type in a case with
4293   // out-of-bounds access (e.g. @PR35657 function in SROA/basictest.ll).
4294   AllocaInst *NewAI;
4295   if (SliceTy == AI.getAllocatedType() && P.beginOffset() == 0) {
4296     NewAI = &AI;
4297     // FIXME: We should be able to bail at this point with "nothing changed".
4298     // FIXME: We might want to defer PHI speculation until after here.
4299     // FIXME: return nullptr;
4300   } else {
4301     // Make sure the alignment is compatible with P.beginOffset().
4302     const Align Alignment = commonAlignment(AI.getAlign(), P.beginOffset());
4303     // If we will get at least this much alignment from the type alone, leave
4304     // the alloca's alignment unconstrained.
4305     const bool IsUnconstrained = Alignment <= DL.getABITypeAlign(SliceTy);
4306     NewAI = new AllocaInst(
4307         SliceTy, AI.getType()->getAddressSpace(), nullptr,
4308         IsUnconstrained ? DL.getPrefTypeAlign(SliceTy) : Alignment,
4309         AI.getName() + ".sroa." + Twine(P.begin() - AS.begin()), &AI);
4310     // Copy the old AI debug location over to the new one.
4311     NewAI->setDebugLoc(AI.getDebugLoc());
4312     ++NumNewAllocas;
4313   }
4314 
4315   LLVM_DEBUG(dbgs() << "Rewriting alloca partition "
4316                     << "[" << P.beginOffset() << "," << P.endOffset()
4317                     << ") to: " << *NewAI << "\n");
4318 
4319   // Track the high watermark on the worklist as it is only relevant for
4320   // promoted allocas. We will reset it to this point if the alloca is not in
4321   // fact scheduled for promotion.
4322   unsigned PPWOldSize = PostPromotionWorklist.size();
4323   unsigned NumUses = 0;
4324   SmallSetVector<PHINode *, 8> PHIUsers;
4325   SmallSetVector<SelectInst *, 8> SelectUsers;
4326 
4327   AllocaSliceRewriter Rewriter(DL, AS, *this, AI, *NewAI, P.beginOffset(),
4328                                P.endOffset(), IsIntegerPromotable, VecTy,
4329                                PHIUsers, SelectUsers);
4330   bool Promotable = true;
4331   for (Slice *S : P.splitSliceTails()) {
4332     Promotable &= Rewriter.visit(S);
4333     ++NumUses;
4334   }
4335   for (Slice &S : P) {
4336     Promotable &= Rewriter.visit(&S);
4337     ++NumUses;
4338   }
4339 
4340   NumAllocaPartitionUses += NumUses;
4341   MaxUsesPerAllocaPartition.updateMax(NumUses);
4342 
4343   // Now that we've processed all the slices in the new partition, check if any
4344   // PHIs or Selects would block promotion.
4345   for (PHINode *PHI : PHIUsers)
4346     if (!isSafePHIToSpeculate(*PHI)) {
4347       Promotable = false;
4348       PHIUsers.clear();
4349       SelectUsers.clear();
4350       break;
4351     }
4352 
4353   for (SelectInst *Sel : SelectUsers)
4354     if (!isSafeSelectToSpeculate(*Sel)) {
4355       Promotable = false;
4356       PHIUsers.clear();
4357       SelectUsers.clear();
4358       break;
4359     }
4360 
4361   if (Promotable) {
4362     for (Use *U : AS.getDeadUsesIfPromotable()) {
4363       auto *OldInst = dyn_cast<Instruction>(U->get());
4364       Value::dropDroppableUse(*U);
4365       if (OldInst)
4366         if (isInstructionTriviallyDead(OldInst))
4367           DeadInsts.push_back(OldInst);
4368     }
4369     if (PHIUsers.empty() && SelectUsers.empty()) {
4370       // Promote the alloca.
4371       PromotableAllocas.push_back(NewAI);
4372     } else {
4373       // If we have either PHIs or Selects to speculate, add them to those
4374       // worklists and re-queue the new alloca so that we promote in on the
4375       // next iteration.
4376       for (PHINode *PHIUser : PHIUsers)
4377         SpeculatablePHIs.insert(PHIUser);
4378       for (SelectInst *SelectUser : SelectUsers)
4379         SpeculatableSelects.insert(SelectUser);
4380       Worklist.insert(NewAI);
4381     }
4382   } else {
4383     // Drop any post-promotion work items if promotion didn't happen.
4384     while (PostPromotionWorklist.size() > PPWOldSize)
4385       PostPromotionWorklist.pop_back();
4386 
4387     // We couldn't promote and we didn't create a new partition, nothing
4388     // happened.
4389     if (NewAI == &AI)
4390       return nullptr;
4391 
4392     // If we can't promote the alloca, iterate on it to check for new
4393     // refinements exposed by splitting the current alloca. Don't iterate on an
4394     // alloca which didn't actually change and didn't get promoted.
4395     Worklist.insert(NewAI);
4396   }
4397 
4398   return NewAI;
4399 }
4400 
4401 /// Walks the slices of an alloca and form partitions based on them,
4402 /// rewriting each of their uses.
4403 bool SROAPass::splitAlloca(AllocaInst &AI, AllocaSlices &AS) {
4404   if (AS.begin() == AS.end())
4405     return false;
4406 
4407   unsigned NumPartitions = 0;
4408   bool Changed = false;
4409   const DataLayout &DL = AI.getModule()->getDataLayout();
4410 
4411   // First try to pre-split loads and stores.
4412   Changed |= presplitLoadsAndStores(AI, AS);
4413 
4414   // Now that we have identified any pre-splitting opportunities,
4415   // mark loads and stores unsplittable except for the following case.
4416   // We leave a slice splittable if all other slices are disjoint or fully
4417   // included in the slice, such as whole-alloca loads and stores.
4418   // If we fail to split these during pre-splitting, we want to force them
4419   // to be rewritten into a partition.
4420   bool IsSorted = true;
4421 
4422   uint64_t AllocaSize =
4423       DL.getTypeAllocSize(AI.getAllocatedType()).getFixedSize();
4424   const uint64_t MaxBitVectorSize = 1024;
4425   if (AllocaSize <= MaxBitVectorSize) {
4426     // If a byte boundary is included in any load or store, a slice starting or
4427     // ending at the boundary is not splittable.
4428     SmallBitVector SplittableOffset(AllocaSize + 1, true);
4429     for (Slice &S : AS)
4430       for (unsigned O = S.beginOffset() + 1;
4431            O < S.endOffset() && O < AllocaSize; O++)
4432         SplittableOffset.reset(O);
4433 
4434     for (Slice &S : AS) {
4435       if (!S.isSplittable())
4436         continue;
4437 
4438       if ((S.beginOffset() > AllocaSize || SplittableOffset[S.beginOffset()]) &&
4439           (S.endOffset() > AllocaSize || SplittableOffset[S.endOffset()]))
4440         continue;
4441 
4442       if (isa<LoadInst>(S.getUse()->getUser()) ||
4443           isa<StoreInst>(S.getUse()->getUser())) {
4444         S.makeUnsplittable();
4445         IsSorted = false;
4446       }
4447     }
4448   }
4449   else {
4450     // We only allow whole-alloca splittable loads and stores
4451     // for a large alloca to avoid creating too large BitVector.
4452     for (Slice &S : AS) {
4453       if (!S.isSplittable())
4454         continue;
4455 
4456       if (S.beginOffset() == 0 && S.endOffset() >= AllocaSize)
4457         continue;
4458 
4459       if (isa<LoadInst>(S.getUse()->getUser()) ||
4460           isa<StoreInst>(S.getUse()->getUser())) {
4461         S.makeUnsplittable();
4462         IsSorted = false;
4463       }
4464     }
4465   }
4466 
4467   if (!IsSorted)
4468     llvm::sort(AS);
4469 
4470   /// Describes the allocas introduced by rewritePartition in order to migrate
4471   /// the debug info.
4472   struct Fragment {
4473     AllocaInst *Alloca;
4474     uint64_t Offset;
4475     uint64_t Size;
4476     Fragment(AllocaInst *AI, uint64_t O, uint64_t S)
4477       : Alloca(AI), Offset(O), Size(S) {}
4478   };
4479   SmallVector<Fragment, 4> Fragments;
4480 
4481   // Rewrite each partition.
4482   for (auto &P : AS.partitions()) {
4483     if (AllocaInst *NewAI = rewritePartition(AI, AS, P)) {
4484       Changed = true;
4485       if (NewAI != &AI) {
4486         uint64_t SizeOfByte = 8;
4487         uint64_t AllocaSize =
4488             DL.getTypeSizeInBits(NewAI->getAllocatedType()).getFixedSize();
4489         // Don't include any padding.
4490         uint64_t Size = std::min(AllocaSize, P.size() * SizeOfByte);
4491         Fragments.push_back(Fragment(NewAI, P.beginOffset() * SizeOfByte, Size));
4492       }
4493     }
4494     ++NumPartitions;
4495   }
4496 
4497   NumAllocaPartitions += NumPartitions;
4498   MaxPartitionsPerAlloca.updateMax(NumPartitions);
4499 
4500   // Migrate debug information from the old alloca to the new alloca(s)
4501   // and the individual partitions.
4502   TinyPtrVector<DbgVariableIntrinsic *> DbgDeclares = FindDbgAddrUses(&AI);
4503   for (DbgVariableIntrinsic *DbgDeclare : DbgDeclares) {
4504     auto *Expr = DbgDeclare->getExpression();
4505     DIBuilder DIB(*AI.getModule(), /*AllowUnresolved*/ false);
4506     uint64_t AllocaSize =
4507         DL.getTypeSizeInBits(AI.getAllocatedType()).getFixedSize();
4508     for (auto Fragment : Fragments) {
4509       // Create a fragment expression describing the new partition or reuse AI's
4510       // expression if there is only one partition.
4511       auto *FragmentExpr = Expr;
4512       if (Fragment.Size < AllocaSize || Expr->isFragment()) {
4513         // If this alloca is already a scalar replacement of a larger aggregate,
4514         // Fragment.Offset describes the offset inside the scalar.
4515         auto ExprFragment = Expr->getFragmentInfo();
4516         uint64_t Offset = ExprFragment ? ExprFragment->OffsetInBits : 0;
4517         uint64_t Start = Offset + Fragment.Offset;
4518         uint64_t Size = Fragment.Size;
4519         if (ExprFragment) {
4520           uint64_t AbsEnd =
4521               ExprFragment->OffsetInBits + ExprFragment->SizeInBits;
4522           if (Start >= AbsEnd)
4523             // No need to describe a SROAed padding.
4524             continue;
4525           Size = std::min(Size, AbsEnd - Start);
4526         }
4527         // The new, smaller fragment is stenciled out from the old fragment.
4528         if (auto OrigFragment = FragmentExpr->getFragmentInfo()) {
4529           assert(Start >= OrigFragment->OffsetInBits &&
4530                  "new fragment is outside of original fragment");
4531           Start -= OrigFragment->OffsetInBits;
4532         }
4533 
4534         // The alloca may be larger than the variable.
4535         auto VarSize = DbgDeclare->getVariable()->getSizeInBits();
4536         if (VarSize) {
4537           if (Size > *VarSize)
4538             Size = *VarSize;
4539           if (Size == 0 || Start + Size > *VarSize)
4540             continue;
4541         }
4542 
4543         // Avoid creating a fragment expression that covers the entire variable.
4544         if (!VarSize || *VarSize != Size) {
4545           if (auto E =
4546                   DIExpression::createFragmentExpression(Expr, Start, Size))
4547             FragmentExpr = *E;
4548           else
4549             continue;
4550         }
4551       }
4552 
4553       // Remove any existing intrinsics on the new alloca describing
4554       // the variable fragment.
4555       for (DbgVariableIntrinsic *OldDII : FindDbgAddrUses(Fragment.Alloca)) {
4556         auto SameVariableFragment = [](const DbgVariableIntrinsic *LHS,
4557                                        const DbgVariableIntrinsic *RHS) {
4558           return LHS->getVariable() == RHS->getVariable() &&
4559                  LHS->getDebugLoc()->getInlinedAt() ==
4560                      RHS->getDebugLoc()->getInlinedAt();
4561         };
4562         if (SameVariableFragment(OldDII, DbgDeclare))
4563           OldDII->eraseFromParent();
4564       }
4565 
4566       DIB.insertDeclare(Fragment.Alloca, DbgDeclare->getVariable(), FragmentExpr,
4567                         DbgDeclare->getDebugLoc(), &AI);
4568     }
4569   }
4570   return Changed;
4571 }
4572 
4573 /// Clobber a use with poison, deleting the used value if it becomes dead.
4574 void SROAPass::clobberUse(Use &U) {
4575   Value *OldV = U;
4576   // Replace the use with an poison value.
4577   U = PoisonValue::get(OldV->getType());
4578 
4579   // Check for this making an instruction dead. We have to garbage collect
4580   // all the dead instructions to ensure the uses of any alloca end up being
4581   // minimal.
4582   if (Instruction *OldI = dyn_cast<Instruction>(OldV))
4583     if (isInstructionTriviallyDead(OldI)) {
4584       DeadInsts.push_back(OldI);
4585     }
4586 }
4587 
4588 /// Analyze an alloca for SROA.
4589 ///
4590 /// This analyzes the alloca to ensure we can reason about it, builds
4591 /// the slices of the alloca, and then hands it off to be split and
4592 /// rewritten as needed.
4593 bool SROAPass::runOnAlloca(AllocaInst &AI) {
4594   LLVM_DEBUG(dbgs() << "SROA alloca: " << AI << "\n");
4595   ++NumAllocasAnalyzed;
4596 
4597   // Special case dead allocas, as they're trivial.
4598   if (AI.use_empty()) {
4599     AI.eraseFromParent();
4600     return true;
4601   }
4602   const DataLayout &DL = AI.getModule()->getDataLayout();
4603 
4604   // Skip alloca forms that this analysis can't handle.
4605   auto *AT = AI.getAllocatedType();
4606   if (AI.isArrayAllocation() || !AT->isSized() || isa<ScalableVectorType>(AT) ||
4607       DL.getTypeAllocSize(AT).getFixedSize() == 0)
4608     return false;
4609 
4610   bool Changed = false;
4611 
4612   // First, split any FCA loads and stores touching this alloca to promote
4613   // better splitting and promotion opportunities.
4614   IRBuilderTy IRB(&AI);
4615   AggLoadStoreRewriter AggRewriter(DL, IRB);
4616   Changed |= AggRewriter.rewrite(AI);
4617 
4618   // Build the slices using a recursive instruction-visiting builder.
4619   AllocaSlices AS(DL, AI);
4620   LLVM_DEBUG(AS.print(dbgs()));
4621   if (AS.isEscaped())
4622     return Changed;
4623 
4624   // Delete all the dead users of this alloca before splitting and rewriting it.
4625   for (Instruction *DeadUser : AS.getDeadUsers()) {
4626     // Free up everything used by this instruction.
4627     for (Use &DeadOp : DeadUser->operands())
4628       clobberUse(DeadOp);
4629 
4630     // Now replace the uses of this instruction.
4631     DeadUser->replaceAllUsesWith(PoisonValue::get(DeadUser->getType()));
4632 
4633     // And mark it for deletion.
4634     DeadInsts.push_back(DeadUser);
4635     Changed = true;
4636   }
4637   for (Use *DeadOp : AS.getDeadOperands()) {
4638     clobberUse(*DeadOp);
4639     Changed = true;
4640   }
4641 
4642   // No slices to split. Leave the dead alloca for a later pass to clean up.
4643   if (AS.begin() == AS.end())
4644     return Changed;
4645 
4646   Changed |= splitAlloca(AI, AS);
4647 
4648   LLVM_DEBUG(dbgs() << "  Speculating PHIs\n");
4649   while (!SpeculatablePHIs.empty())
4650     speculatePHINodeLoads(IRB, *SpeculatablePHIs.pop_back_val());
4651 
4652   LLVM_DEBUG(dbgs() << "  Speculating Selects\n");
4653   while (!SpeculatableSelects.empty())
4654     speculateSelectInstLoads(IRB, *SpeculatableSelects.pop_back_val());
4655 
4656   return Changed;
4657 }
4658 
4659 /// Delete the dead instructions accumulated in this run.
4660 ///
4661 /// Recursively deletes the dead instructions we've accumulated. This is done
4662 /// at the very end to maximize locality of the recursive delete and to
4663 /// minimize the problems of invalidated instruction pointers as such pointers
4664 /// are used heavily in the intermediate stages of the algorithm.
4665 ///
4666 /// We also record the alloca instructions deleted here so that they aren't
4667 /// subsequently handed to mem2reg to promote.
4668 bool SROAPass::deleteDeadInstructions(
4669     SmallPtrSetImpl<AllocaInst *> &DeletedAllocas) {
4670   bool Changed = false;
4671   while (!DeadInsts.empty()) {
4672     Instruction *I = dyn_cast_or_null<Instruction>(DeadInsts.pop_back_val());
4673     if (!I) continue;
4674     LLVM_DEBUG(dbgs() << "Deleting dead instruction: " << *I << "\n");
4675 
4676     // If the instruction is an alloca, find the possible dbg.declare connected
4677     // to it, and remove it too. We must do this before calling RAUW or we will
4678     // not be able to find it.
4679     if (AllocaInst *AI = dyn_cast<AllocaInst>(I)) {
4680       DeletedAllocas.insert(AI);
4681       for (DbgVariableIntrinsic *OldDII : FindDbgAddrUses(AI))
4682         OldDII->eraseFromParent();
4683     }
4684 
4685     I->replaceAllUsesWith(UndefValue::get(I->getType()));
4686 
4687     for (Use &Operand : I->operands())
4688       if (Instruction *U = dyn_cast<Instruction>(Operand)) {
4689         // Zero out the operand and see if it becomes trivially dead.
4690         Operand = nullptr;
4691         if (isInstructionTriviallyDead(U))
4692           DeadInsts.push_back(U);
4693       }
4694 
4695     ++NumDeleted;
4696     I->eraseFromParent();
4697     Changed = true;
4698   }
4699   return Changed;
4700 }
4701 
4702 /// Promote the allocas, using the best available technique.
4703 ///
4704 /// This attempts to promote whatever allocas have been identified as viable in
4705 /// the PromotableAllocas list. If that list is empty, there is nothing to do.
4706 /// This function returns whether any promotion occurred.
4707 bool SROAPass::promoteAllocas(Function &F) {
4708   if (PromotableAllocas.empty())
4709     return false;
4710 
4711   NumPromoted += PromotableAllocas.size();
4712 
4713   LLVM_DEBUG(dbgs() << "Promoting allocas with mem2reg...\n");
4714   PromoteMemToReg(PromotableAllocas, *DT, AC);
4715   PromotableAllocas.clear();
4716   return true;
4717 }
4718 
4719 PreservedAnalyses SROAPass::runImpl(Function &F, DominatorTree &RunDT,
4720                                     AssumptionCache &RunAC) {
4721   LLVM_DEBUG(dbgs() << "SROA function: " << F.getName() << "\n");
4722   C = &F.getContext();
4723   DT = &RunDT;
4724   AC = &RunAC;
4725 
4726   BasicBlock &EntryBB = F.getEntryBlock();
4727   for (BasicBlock::iterator I = EntryBB.begin(), E = std::prev(EntryBB.end());
4728        I != E; ++I) {
4729     if (AllocaInst *AI = dyn_cast<AllocaInst>(I)) {
4730       if (isa<ScalableVectorType>(AI->getAllocatedType())) {
4731         if (isAllocaPromotable(AI))
4732           PromotableAllocas.push_back(AI);
4733       } else {
4734         Worklist.insert(AI);
4735       }
4736     }
4737   }
4738 
4739   bool Changed = false;
4740   // A set of deleted alloca instruction pointers which should be removed from
4741   // the list of promotable allocas.
4742   SmallPtrSet<AllocaInst *, 4> DeletedAllocas;
4743 
4744   do {
4745     while (!Worklist.empty()) {
4746       Changed |= runOnAlloca(*Worklist.pop_back_val());
4747       Changed |= deleteDeadInstructions(DeletedAllocas);
4748 
4749       // Remove the deleted allocas from various lists so that we don't try to
4750       // continue processing them.
4751       if (!DeletedAllocas.empty()) {
4752         auto IsInSet = [&](AllocaInst *AI) { return DeletedAllocas.count(AI); };
4753         Worklist.remove_if(IsInSet);
4754         PostPromotionWorklist.remove_if(IsInSet);
4755         llvm::erase_if(PromotableAllocas, IsInSet);
4756         DeletedAllocas.clear();
4757       }
4758     }
4759 
4760     Changed |= promoteAllocas(F);
4761 
4762     Worklist = PostPromotionWorklist;
4763     PostPromotionWorklist.clear();
4764   } while (!Worklist.empty());
4765 
4766   if (!Changed)
4767     return PreservedAnalyses::all();
4768 
4769   PreservedAnalyses PA;
4770   PA.preserveSet<CFGAnalyses>();
4771   return PA;
4772 }
4773 
4774 PreservedAnalyses SROAPass::run(Function &F, FunctionAnalysisManager &AM) {
4775   return runImpl(F, AM.getResult<DominatorTreeAnalysis>(F),
4776                  AM.getResult<AssumptionAnalysis>(F));
4777 }
4778 
4779 /// A legacy pass for the legacy pass manager that wraps the \c SROA pass.
4780 ///
4781 /// This is in the llvm namespace purely to allow it to be a friend of the \c
4782 /// SROA pass.
4783 class llvm::sroa::SROALegacyPass : public FunctionPass {
4784   /// The SROA implementation.
4785   SROAPass Impl;
4786 
4787 public:
4788   static char ID;
4789 
4790   SROALegacyPass() : FunctionPass(ID) {
4791     initializeSROALegacyPassPass(*PassRegistry::getPassRegistry());
4792   }
4793 
4794   bool runOnFunction(Function &F) override {
4795     if (skipFunction(F))
4796       return false;
4797 
4798     auto PA = Impl.runImpl(
4799         F, getAnalysis<DominatorTreeWrapperPass>().getDomTree(),
4800         getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F));
4801     return !PA.areAllPreserved();
4802   }
4803 
4804   void getAnalysisUsage(AnalysisUsage &AU) const override {
4805     AU.addRequired<AssumptionCacheTracker>();
4806     AU.addRequired<DominatorTreeWrapperPass>();
4807     AU.addPreserved<GlobalsAAWrapperPass>();
4808     AU.setPreservesCFG();
4809   }
4810 
4811   StringRef getPassName() const override { return "SROA"; }
4812 };
4813 
4814 char SROALegacyPass::ID = 0;
4815 
4816 FunctionPass *llvm::createSROAPass() { return new SROALegacyPass(); }
4817 
4818 INITIALIZE_PASS_BEGIN(SROALegacyPass, "sroa",
4819                       "Scalar Replacement Of Aggregates", false, false)
4820 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
4821 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
4822 INITIALIZE_PASS_END(SROALegacyPass, "sroa", "Scalar Replacement Of Aggregates",
4823                     false, false)
4824