1 //===- SROA.cpp - Scalar Replacement Of Aggregates ------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 /// \file 10 /// This transformation implements the well known scalar replacement of 11 /// aggregates transformation. It tries to identify promotable elements of an 12 /// aggregate alloca, and promote them to registers. It will also try to 13 /// convert uses of an element (or set of elements) of an alloca into a vector 14 /// or bitfield-style integer scalar if appropriate. 15 /// 16 /// It works to do this with minimal slicing of the alloca so that regions 17 /// which are merely transferred in and out of external memory remain unchanged 18 /// and are not decomposed to scalar code. 19 /// 20 /// Because this also performs alloca promotion, it can be thought of as also 21 /// serving the purpose of SSA formation. The algorithm iterates on the 22 /// function until all opportunities for promotion have been realized. 23 /// 24 //===----------------------------------------------------------------------===// 25 26 #include "llvm/Transforms/Scalar/SROA.h" 27 #include "llvm/ADT/STLExtras.h" 28 #include "llvm/ADT/SetVector.h" 29 #include "llvm/ADT/SmallVector.h" 30 #include "llvm/ADT/Statistic.h" 31 #include "llvm/Analysis/AssumptionCache.h" 32 #include "llvm/Analysis/GlobalsModRef.h" 33 #include "llvm/Analysis/Loads.h" 34 #include "llvm/Analysis/PtrUseVisitor.h" 35 #include "llvm/Analysis/ValueTracking.h" 36 #include "llvm/IR/Constants.h" 37 #include "llvm/IR/DIBuilder.h" 38 #include "llvm/IR/DataLayout.h" 39 #include "llvm/IR/DebugInfo.h" 40 #include "llvm/IR/DerivedTypes.h" 41 #include "llvm/IR/IRBuilder.h" 42 #include "llvm/IR/InstVisitor.h" 43 #include "llvm/IR/Instructions.h" 44 #include "llvm/IR/IntrinsicInst.h" 45 #include "llvm/IR/LLVMContext.h" 46 #include "llvm/IR/Operator.h" 47 #include "llvm/Pass.h" 48 #include "llvm/Support/Chrono.h" 49 #include "llvm/Support/CommandLine.h" 50 #include "llvm/Support/Compiler.h" 51 #include "llvm/Support/Debug.h" 52 #include "llvm/Support/ErrorHandling.h" 53 #include "llvm/Support/MathExtras.h" 54 #include "llvm/Support/raw_ostream.h" 55 #include "llvm/Transforms/Scalar.h" 56 #include "llvm/Transforms/Utils/Local.h" 57 #include "llvm/Transforms/Utils/PromoteMemToReg.h" 58 59 #ifndef NDEBUG 60 // We only use this for a debug check. 61 #include <random> 62 #endif 63 64 using namespace llvm; 65 using namespace llvm::sroa; 66 67 #define DEBUG_TYPE "sroa" 68 69 STATISTIC(NumAllocasAnalyzed, "Number of allocas analyzed for replacement"); 70 STATISTIC(NumAllocaPartitions, "Number of alloca partitions formed"); 71 STATISTIC(MaxPartitionsPerAlloca, "Maximum number of partitions per alloca"); 72 STATISTIC(NumAllocaPartitionUses, "Number of alloca partition uses rewritten"); 73 STATISTIC(MaxUsesPerAllocaPartition, "Maximum number of uses of a partition"); 74 STATISTIC(NumNewAllocas, "Number of new, smaller allocas introduced"); 75 STATISTIC(NumPromoted, "Number of allocas promoted to SSA values"); 76 STATISTIC(NumLoadsSpeculated, "Number of loads speculated to allow promotion"); 77 STATISTIC(NumDeleted, "Number of instructions deleted"); 78 STATISTIC(NumVectorized, "Number of vectorized aggregates"); 79 80 /// Hidden option to enable randomly shuffling the slices to help uncover 81 /// instability in their order. 82 static cl::opt<bool> SROARandomShuffleSlices("sroa-random-shuffle-slices", 83 cl::init(false), cl::Hidden); 84 85 /// Hidden option to experiment with completely strict handling of inbounds 86 /// GEPs. 87 static cl::opt<bool> SROAStrictInbounds("sroa-strict-inbounds", cl::init(false), 88 cl::Hidden); 89 90 namespace { 91 /// \brief A custom IRBuilder inserter which prefixes all names, but only in 92 /// Assert builds. 93 class IRBuilderPrefixedInserter : public IRBuilderDefaultInserter { 94 std::string Prefix; 95 const Twine getNameWithPrefix(const Twine &Name) const { 96 return Name.isTriviallyEmpty() ? Name : Prefix + Name; 97 } 98 99 public: 100 void SetNamePrefix(const Twine &P) { Prefix = P.str(); } 101 102 protected: 103 void InsertHelper(Instruction *I, const Twine &Name, BasicBlock *BB, 104 BasicBlock::iterator InsertPt) const { 105 IRBuilderDefaultInserter::InsertHelper(I, getNameWithPrefix(Name), BB, 106 InsertPt); 107 } 108 }; 109 110 /// \brief Provide a typedef for IRBuilder that drops names in release builds. 111 using IRBuilderTy = llvm::IRBuilder<ConstantFolder, IRBuilderPrefixedInserter>; 112 } 113 114 namespace { 115 /// \brief A used slice of an alloca. 116 /// 117 /// This structure represents a slice of an alloca used by some instruction. It 118 /// stores both the begin and end offsets of this use, a pointer to the use 119 /// itself, and a flag indicating whether we can classify the use as splittable 120 /// or not when forming partitions of the alloca. 121 class Slice { 122 /// \brief The beginning offset of the range. 123 uint64_t BeginOffset; 124 125 /// \brief The ending offset, not included in the range. 126 uint64_t EndOffset; 127 128 /// \brief Storage for both the use of this slice and whether it can be 129 /// split. 130 PointerIntPair<Use *, 1, bool> UseAndIsSplittable; 131 132 public: 133 Slice() : BeginOffset(), EndOffset() {} 134 Slice(uint64_t BeginOffset, uint64_t EndOffset, Use *U, bool IsSplittable) 135 : BeginOffset(BeginOffset), EndOffset(EndOffset), 136 UseAndIsSplittable(U, IsSplittable) {} 137 138 uint64_t beginOffset() const { return BeginOffset; } 139 uint64_t endOffset() const { return EndOffset; } 140 141 bool isSplittable() const { return UseAndIsSplittable.getInt(); } 142 void makeUnsplittable() { UseAndIsSplittable.setInt(false); } 143 144 Use *getUse() const { return UseAndIsSplittable.getPointer(); } 145 146 bool isDead() const { return getUse() == nullptr; } 147 void kill() { UseAndIsSplittable.setPointer(nullptr); } 148 149 /// \brief Support for ordering ranges. 150 /// 151 /// This provides an ordering over ranges such that start offsets are 152 /// always increasing, and within equal start offsets, the end offsets are 153 /// decreasing. Thus the spanning range comes first in a cluster with the 154 /// same start position. 155 bool operator<(const Slice &RHS) const { 156 if (beginOffset() < RHS.beginOffset()) 157 return true; 158 if (beginOffset() > RHS.beginOffset()) 159 return false; 160 if (isSplittable() != RHS.isSplittable()) 161 return !isSplittable(); 162 if (endOffset() > RHS.endOffset()) 163 return true; 164 return false; 165 } 166 167 /// \brief Support comparison with a single offset to allow binary searches. 168 friend LLVM_ATTRIBUTE_UNUSED bool operator<(const Slice &LHS, 169 uint64_t RHSOffset) { 170 return LHS.beginOffset() < RHSOffset; 171 } 172 friend LLVM_ATTRIBUTE_UNUSED bool operator<(uint64_t LHSOffset, 173 const Slice &RHS) { 174 return LHSOffset < RHS.beginOffset(); 175 } 176 177 bool operator==(const Slice &RHS) const { 178 return isSplittable() == RHS.isSplittable() && 179 beginOffset() == RHS.beginOffset() && endOffset() == RHS.endOffset(); 180 } 181 bool operator!=(const Slice &RHS) const { return !operator==(RHS); } 182 }; 183 } // end anonymous namespace 184 185 namespace llvm { 186 template <typename T> struct isPodLike; 187 template <> struct isPodLike<Slice> { static const bool value = true; }; 188 } 189 190 /// \brief Representation of the alloca slices. 191 /// 192 /// This class represents the slices of an alloca which are formed by its 193 /// various uses. If a pointer escapes, we can't fully build a representation 194 /// for the slices used and we reflect that in this structure. The uses are 195 /// stored, sorted by increasing beginning offset and with unsplittable slices 196 /// starting at a particular offset before splittable slices. 197 class llvm::sroa::AllocaSlices { 198 public: 199 /// \brief Construct the slices of a particular alloca. 200 AllocaSlices(const DataLayout &DL, AllocaInst &AI); 201 202 /// \brief Test whether a pointer to the allocation escapes our analysis. 203 /// 204 /// If this is true, the slices are never fully built and should be 205 /// ignored. 206 bool isEscaped() const { return PointerEscapingInstr; } 207 208 /// \brief Support for iterating over the slices. 209 /// @{ 210 typedef SmallVectorImpl<Slice>::iterator iterator; 211 typedef iterator_range<iterator> range; 212 iterator begin() { return Slices.begin(); } 213 iterator end() { return Slices.end(); } 214 215 typedef SmallVectorImpl<Slice>::const_iterator const_iterator; 216 typedef iterator_range<const_iterator> const_range; 217 const_iterator begin() const { return Slices.begin(); } 218 const_iterator end() const { return Slices.end(); } 219 /// @} 220 221 /// \brief Erase a range of slices. 222 void erase(iterator Start, iterator Stop) { Slices.erase(Start, Stop); } 223 224 /// \brief Insert new slices for this alloca. 225 /// 226 /// This moves the slices into the alloca's slices collection, and re-sorts 227 /// everything so that the usual ordering properties of the alloca's slices 228 /// hold. 229 void insert(ArrayRef<Slice> NewSlices) { 230 int OldSize = Slices.size(); 231 Slices.append(NewSlices.begin(), NewSlices.end()); 232 auto SliceI = Slices.begin() + OldSize; 233 std::sort(SliceI, Slices.end()); 234 std::inplace_merge(Slices.begin(), SliceI, Slices.end()); 235 } 236 237 // Forward declare the iterator and range accessor for walking the 238 // partitions. 239 class partition_iterator; 240 iterator_range<partition_iterator> partitions(); 241 242 /// \brief Access the dead users for this alloca. 243 ArrayRef<Instruction *> getDeadUsers() const { return DeadUsers; } 244 245 /// \brief Access the dead operands referring to this alloca. 246 /// 247 /// These are operands which have cannot actually be used to refer to the 248 /// alloca as they are outside its range and the user doesn't correct for 249 /// that. These mostly consist of PHI node inputs and the like which we just 250 /// need to replace with undef. 251 ArrayRef<Use *> getDeadOperands() const { return DeadOperands; } 252 253 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 254 void print(raw_ostream &OS, const_iterator I, StringRef Indent = " ") const; 255 void printSlice(raw_ostream &OS, const_iterator I, 256 StringRef Indent = " ") const; 257 void printUse(raw_ostream &OS, const_iterator I, 258 StringRef Indent = " ") const; 259 void print(raw_ostream &OS) const; 260 void dump(const_iterator I) const; 261 void dump() const; 262 #endif 263 264 private: 265 template <typename DerivedT, typename RetT = void> class BuilderBase; 266 class SliceBuilder; 267 friend class AllocaSlices::SliceBuilder; 268 269 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 270 /// \brief Handle to alloca instruction to simplify method interfaces. 271 AllocaInst &AI; 272 #endif 273 274 /// \brief The instruction responsible for this alloca not having a known set 275 /// of slices. 276 /// 277 /// When an instruction (potentially) escapes the pointer to the alloca, we 278 /// store a pointer to that here and abort trying to form slices of the 279 /// alloca. This will be null if the alloca slices are analyzed successfully. 280 Instruction *PointerEscapingInstr; 281 282 /// \brief The slices of the alloca. 283 /// 284 /// We store a vector of the slices formed by uses of the alloca here. This 285 /// vector is sorted by increasing begin offset, and then the unsplittable 286 /// slices before the splittable ones. See the Slice inner class for more 287 /// details. 288 SmallVector<Slice, 8> Slices; 289 290 /// \brief Instructions which will become dead if we rewrite the alloca. 291 /// 292 /// Note that these are not separated by slice. This is because we expect an 293 /// alloca to be completely rewritten or not rewritten at all. If rewritten, 294 /// all these instructions can simply be removed and replaced with undef as 295 /// they come from outside of the allocated space. 296 SmallVector<Instruction *, 8> DeadUsers; 297 298 /// \brief Operands which will become dead if we rewrite the alloca. 299 /// 300 /// These are operands that in their particular use can be replaced with 301 /// undef when we rewrite the alloca. These show up in out-of-bounds inputs 302 /// to PHI nodes and the like. They aren't entirely dead (there might be 303 /// a GEP back into the bounds using it elsewhere) and nor is the PHI, but we 304 /// want to swap this particular input for undef to simplify the use lists of 305 /// the alloca. 306 SmallVector<Use *, 8> DeadOperands; 307 }; 308 309 /// \brief A partition of the slices. 310 /// 311 /// An ephemeral representation for a range of slices which can be viewed as 312 /// a partition of the alloca. This range represents a span of the alloca's 313 /// memory which cannot be split, and provides access to all of the slices 314 /// overlapping some part of the partition. 315 /// 316 /// Objects of this type are produced by traversing the alloca's slices, but 317 /// are only ephemeral and not persistent. 318 class llvm::sroa::Partition { 319 private: 320 friend class AllocaSlices; 321 friend class AllocaSlices::partition_iterator; 322 323 typedef AllocaSlices::iterator iterator; 324 325 /// \brief The beginning and ending offsets of the alloca for this 326 /// partition. 327 uint64_t BeginOffset, EndOffset; 328 329 /// \brief The start and end iterators of this partition. 330 iterator SI, SJ; 331 332 /// \brief A collection of split slice tails overlapping the partition. 333 SmallVector<Slice *, 4> SplitTails; 334 335 /// \brief Raw constructor builds an empty partition starting and ending at 336 /// the given iterator. 337 Partition(iterator SI) : SI(SI), SJ(SI) {} 338 339 public: 340 /// \brief The start offset of this partition. 341 /// 342 /// All of the contained slices start at or after this offset. 343 uint64_t beginOffset() const { return BeginOffset; } 344 345 /// \brief The end offset of this partition. 346 /// 347 /// All of the contained slices end at or before this offset. 348 uint64_t endOffset() const { return EndOffset; } 349 350 /// \brief The size of the partition. 351 /// 352 /// Note that this can never be zero. 353 uint64_t size() const { 354 assert(BeginOffset < EndOffset && "Partitions must span some bytes!"); 355 return EndOffset - BeginOffset; 356 } 357 358 /// \brief Test whether this partition contains no slices, and merely spans 359 /// a region occupied by split slices. 360 bool empty() const { return SI == SJ; } 361 362 /// \name Iterate slices that start within the partition. 363 /// These may be splittable or unsplittable. They have a begin offset >= the 364 /// partition begin offset. 365 /// @{ 366 // FIXME: We should probably define a "concat_iterator" helper and use that 367 // to stitch together pointee_iterators over the split tails and the 368 // contiguous iterators of the partition. That would give a much nicer 369 // interface here. We could then additionally expose filtered iterators for 370 // split, unsplit, and unsplittable splices based on the usage patterns. 371 iterator begin() const { return SI; } 372 iterator end() const { return SJ; } 373 /// @} 374 375 /// \brief Get the sequence of split slice tails. 376 /// 377 /// These tails are of slices which start before this partition but are 378 /// split and overlap into the partition. We accumulate these while forming 379 /// partitions. 380 ArrayRef<Slice *> splitSliceTails() const { return SplitTails; } 381 }; 382 383 /// \brief An iterator over partitions of the alloca's slices. 384 /// 385 /// This iterator implements the core algorithm for partitioning the alloca's 386 /// slices. It is a forward iterator as we don't support backtracking for 387 /// efficiency reasons, and re-use a single storage area to maintain the 388 /// current set of split slices. 389 /// 390 /// It is templated on the slice iterator type to use so that it can operate 391 /// with either const or non-const slice iterators. 392 class AllocaSlices::partition_iterator 393 : public iterator_facade_base<partition_iterator, std::forward_iterator_tag, 394 Partition> { 395 friend class AllocaSlices; 396 397 /// \brief Most of the state for walking the partitions is held in a class 398 /// with a nice interface for examining them. 399 Partition P; 400 401 /// \brief We need to keep the end of the slices to know when to stop. 402 AllocaSlices::iterator SE; 403 404 /// \brief We also need to keep track of the maximum split end offset seen. 405 /// FIXME: Do we really? 406 uint64_t MaxSplitSliceEndOffset; 407 408 /// \brief Sets the partition to be empty at given iterator, and sets the 409 /// end iterator. 410 partition_iterator(AllocaSlices::iterator SI, AllocaSlices::iterator SE) 411 : P(SI), SE(SE), MaxSplitSliceEndOffset(0) { 412 // If not already at the end, advance our state to form the initial 413 // partition. 414 if (SI != SE) 415 advance(); 416 } 417 418 /// \brief Advance the iterator to the next partition. 419 /// 420 /// Requires that the iterator not be at the end of the slices. 421 void advance() { 422 assert((P.SI != SE || !P.SplitTails.empty()) && 423 "Cannot advance past the end of the slices!"); 424 425 // Clear out any split uses which have ended. 426 if (!P.SplitTails.empty()) { 427 if (P.EndOffset >= MaxSplitSliceEndOffset) { 428 // If we've finished all splits, this is easy. 429 P.SplitTails.clear(); 430 MaxSplitSliceEndOffset = 0; 431 } else { 432 // Remove the uses which have ended in the prior partition. This 433 // cannot change the max split slice end because we just checked that 434 // the prior partition ended prior to that max. 435 P.SplitTails.erase( 436 remove_if(P.SplitTails, 437 [&](Slice *S) { return S->endOffset() <= P.EndOffset; }), 438 P.SplitTails.end()); 439 assert(any_of(P.SplitTails, 440 [&](Slice *S) { 441 return S->endOffset() == MaxSplitSliceEndOffset; 442 }) && 443 "Could not find the current max split slice offset!"); 444 assert(all_of(P.SplitTails, 445 [&](Slice *S) { 446 return S->endOffset() <= MaxSplitSliceEndOffset; 447 }) && 448 "Max split slice end offset is not actually the max!"); 449 } 450 } 451 452 // If P.SI is already at the end, then we've cleared the split tail and 453 // now have an end iterator. 454 if (P.SI == SE) { 455 assert(P.SplitTails.empty() && "Failed to clear the split slices!"); 456 return; 457 } 458 459 // If we had a non-empty partition previously, set up the state for 460 // subsequent partitions. 461 if (P.SI != P.SJ) { 462 // Accumulate all the splittable slices which started in the old 463 // partition into the split list. 464 for (Slice &S : P) 465 if (S.isSplittable() && S.endOffset() > P.EndOffset) { 466 P.SplitTails.push_back(&S); 467 MaxSplitSliceEndOffset = 468 std::max(S.endOffset(), MaxSplitSliceEndOffset); 469 } 470 471 // Start from the end of the previous partition. 472 P.SI = P.SJ; 473 474 // If P.SI is now at the end, we at most have a tail of split slices. 475 if (P.SI == SE) { 476 P.BeginOffset = P.EndOffset; 477 P.EndOffset = MaxSplitSliceEndOffset; 478 return; 479 } 480 481 // If the we have split slices and the next slice is after a gap and is 482 // not splittable immediately form an empty partition for the split 483 // slices up until the next slice begins. 484 if (!P.SplitTails.empty() && P.SI->beginOffset() != P.EndOffset && 485 !P.SI->isSplittable()) { 486 P.BeginOffset = P.EndOffset; 487 P.EndOffset = P.SI->beginOffset(); 488 return; 489 } 490 } 491 492 // OK, we need to consume new slices. Set the end offset based on the 493 // current slice, and step SJ past it. The beginning offset of the 494 // partition is the beginning offset of the next slice unless we have 495 // pre-existing split slices that are continuing, in which case we begin 496 // at the prior end offset. 497 P.BeginOffset = P.SplitTails.empty() ? P.SI->beginOffset() : P.EndOffset; 498 P.EndOffset = P.SI->endOffset(); 499 ++P.SJ; 500 501 // There are two strategies to form a partition based on whether the 502 // partition starts with an unsplittable slice or a splittable slice. 503 if (!P.SI->isSplittable()) { 504 // When we're forming an unsplittable region, it must always start at 505 // the first slice and will extend through its end. 506 assert(P.BeginOffset == P.SI->beginOffset()); 507 508 // Form a partition including all of the overlapping slices with this 509 // unsplittable slice. 510 while (P.SJ != SE && P.SJ->beginOffset() < P.EndOffset) { 511 if (!P.SJ->isSplittable()) 512 P.EndOffset = std::max(P.EndOffset, P.SJ->endOffset()); 513 ++P.SJ; 514 } 515 516 // We have a partition across a set of overlapping unsplittable 517 // partitions. 518 return; 519 } 520 521 // If we're starting with a splittable slice, then we need to form 522 // a synthetic partition spanning it and any other overlapping splittable 523 // splices. 524 assert(P.SI->isSplittable() && "Forming a splittable partition!"); 525 526 // Collect all of the overlapping splittable slices. 527 while (P.SJ != SE && P.SJ->beginOffset() < P.EndOffset && 528 P.SJ->isSplittable()) { 529 P.EndOffset = std::max(P.EndOffset, P.SJ->endOffset()); 530 ++P.SJ; 531 } 532 533 // Back upiP.EndOffset if we ended the span early when encountering an 534 // unsplittable slice. This synthesizes the early end offset of 535 // a partition spanning only splittable slices. 536 if (P.SJ != SE && P.SJ->beginOffset() < P.EndOffset) { 537 assert(!P.SJ->isSplittable()); 538 P.EndOffset = P.SJ->beginOffset(); 539 } 540 } 541 542 public: 543 bool operator==(const partition_iterator &RHS) const { 544 assert(SE == RHS.SE && 545 "End iterators don't match between compared partition iterators!"); 546 547 // The observed positions of partitions is marked by the P.SI iterator and 548 // the emptiness of the split slices. The latter is only relevant when 549 // P.SI == SE, as the end iterator will additionally have an empty split 550 // slices list, but the prior may have the same P.SI and a tail of split 551 // slices. 552 if (P.SI == RHS.P.SI && P.SplitTails.empty() == RHS.P.SplitTails.empty()) { 553 assert(P.SJ == RHS.P.SJ && 554 "Same set of slices formed two different sized partitions!"); 555 assert(P.SplitTails.size() == RHS.P.SplitTails.size() && 556 "Same slice position with differently sized non-empty split " 557 "slice tails!"); 558 return true; 559 } 560 return false; 561 } 562 563 partition_iterator &operator++() { 564 advance(); 565 return *this; 566 } 567 568 Partition &operator*() { return P; } 569 }; 570 571 /// \brief A forward range over the partitions of the alloca's slices. 572 /// 573 /// This accesses an iterator range over the partitions of the alloca's 574 /// slices. It computes these partitions on the fly based on the overlapping 575 /// offsets of the slices and the ability to split them. It will visit "empty" 576 /// partitions to cover regions of the alloca only accessed via split 577 /// slices. 578 iterator_range<AllocaSlices::partition_iterator> AllocaSlices::partitions() { 579 return make_range(partition_iterator(begin(), end()), 580 partition_iterator(end(), end())); 581 } 582 583 static Value *foldSelectInst(SelectInst &SI) { 584 // If the condition being selected on is a constant or the same value is 585 // being selected between, fold the select. Yes this does (rarely) happen 586 // early on. 587 if (ConstantInt *CI = dyn_cast<ConstantInt>(SI.getCondition())) 588 return SI.getOperand(1 + CI->isZero()); 589 if (SI.getOperand(1) == SI.getOperand(2)) 590 return SI.getOperand(1); 591 592 return nullptr; 593 } 594 595 /// \brief A helper that folds a PHI node or a select. 596 static Value *foldPHINodeOrSelectInst(Instruction &I) { 597 if (PHINode *PN = dyn_cast<PHINode>(&I)) { 598 // If PN merges together the same value, return that value. 599 return PN->hasConstantValue(); 600 } 601 return foldSelectInst(cast<SelectInst>(I)); 602 } 603 604 /// \brief Builder for the alloca slices. 605 /// 606 /// This class builds a set of alloca slices by recursively visiting the uses 607 /// of an alloca and making a slice for each load and store at each offset. 608 class AllocaSlices::SliceBuilder : public PtrUseVisitor<SliceBuilder> { 609 friend class PtrUseVisitor<SliceBuilder>; 610 friend class InstVisitor<SliceBuilder>; 611 typedef PtrUseVisitor<SliceBuilder> Base; 612 613 const uint64_t AllocSize; 614 AllocaSlices &AS; 615 616 SmallDenseMap<Instruction *, unsigned> MemTransferSliceMap; 617 SmallDenseMap<Instruction *, uint64_t> PHIOrSelectSizes; 618 619 /// \brief Set to de-duplicate dead instructions found in the use walk. 620 SmallPtrSet<Instruction *, 4> VisitedDeadInsts; 621 622 public: 623 SliceBuilder(const DataLayout &DL, AllocaInst &AI, AllocaSlices &AS) 624 : PtrUseVisitor<SliceBuilder>(DL), 625 AllocSize(DL.getTypeAllocSize(AI.getAllocatedType())), AS(AS) {} 626 627 private: 628 void markAsDead(Instruction &I) { 629 if (VisitedDeadInsts.insert(&I).second) 630 AS.DeadUsers.push_back(&I); 631 } 632 633 void insertUse(Instruction &I, const APInt &Offset, uint64_t Size, 634 bool IsSplittable = false) { 635 // Completely skip uses which have a zero size or start either before or 636 // past the end of the allocation. 637 if (Size == 0 || Offset.uge(AllocSize)) { 638 DEBUG(dbgs() << "WARNING: Ignoring " << Size << " byte use @" << Offset 639 << " which has zero size or starts outside of the " 640 << AllocSize << " byte alloca:\n" 641 << " alloca: " << AS.AI << "\n" 642 << " use: " << I << "\n"); 643 return markAsDead(I); 644 } 645 646 uint64_t BeginOffset = Offset.getZExtValue(); 647 uint64_t EndOffset = BeginOffset + Size; 648 649 // Clamp the end offset to the end of the allocation. Note that this is 650 // formulated to handle even the case where "BeginOffset + Size" overflows. 651 // This may appear superficially to be something we could ignore entirely, 652 // but that is not so! There may be widened loads or PHI-node uses where 653 // some instructions are dead but not others. We can't completely ignore 654 // them, and so have to record at least the information here. 655 assert(AllocSize >= BeginOffset); // Established above. 656 if (Size > AllocSize - BeginOffset) { 657 DEBUG(dbgs() << "WARNING: Clamping a " << Size << " byte use @" << Offset 658 << " to remain within the " << AllocSize << " byte alloca:\n" 659 << " alloca: " << AS.AI << "\n" 660 << " use: " << I << "\n"); 661 EndOffset = AllocSize; 662 } 663 664 AS.Slices.push_back(Slice(BeginOffset, EndOffset, U, IsSplittable)); 665 } 666 667 void visitBitCastInst(BitCastInst &BC) { 668 if (BC.use_empty()) 669 return markAsDead(BC); 670 671 return Base::visitBitCastInst(BC); 672 } 673 674 void visitGetElementPtrInst(GetElementPtrInst &GEPI) { 675 if (GEPI.use_empty()) 676 return markAsDead(GEPI); 677 678 if (SROAStrictInbounds && GEPI.isInBounds()) { 679 // FIXME: This is a manually un-factored variant of the basic code inside 680 // of GEPs with checking of the inbounds invariant specified in the 681 // langref in a very strict sense. If we ever want to enable 682 // SROAStrictInbounds, this code should be factored cleanly into 683 // PtrUseVisitor, but it is easier to experiment with SROAStrictInbounds 684 // by writing out the code here where we have the underlying allocation 685 // size readily available. 686 APInt GEPOffset = Offset; 687 const DataLayout &DL = GEPI.getModule()->getDataLayout(); 688 for (gep_type_iterator GTI = gep_type_begin(GEPI), 689 GTE = gep_type_end(GEPI); 690 GTI != GTE; ++GTI) { 691 ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand()); 692 if (!OpC) 693 break; 694 695 // Handle a struct index, which adds its field offset to the pointer. 696 if (StructType *STy = GTI.getStructTypeOrNull()) { 697 unsigned ElementIdx = OpC->getZExtValue(); 698 const StructLayout *SL = DL.getStructLayout(STy); 699 GEPOffset += 700 APInt(Offset.getBitWidth(), SL->getElementOffset(ElementIdx)); 701 } else { 702 // For array or vector indices, scale the index by the size of the 703 // type. 704 APInt Index = OpC->getValue().sextOrTrunc(Offset.getBitWidth()); 705 GEPOffset += Index * APInt(Offset.getBitWidth(), 706 DL.getTypeAllocSize(GTI.getIndexedType())); 707 } 708 709 // If this index has computed an intermediate pointer which is not 710 // inbounds, then the result of the GEP is a poison value and we can 711 // delete it and all uses. 712 if (GEPOffset.ugt(AllocSize)) 713 return markAsDead(GEPI); 714 } 715 } 716 717 return Base::visitGetElementPtrInst(GEPI); 718 } 719 720 void handleLoadOrStore(Type *Ty, Instruction &I, const APInt &Offset, 721 uint64_t Size, bool IsVolatile) { 722 // We allow splitting of non-volatile loads and stores where the type is an 723 // integer type. These may be used to implement 'memcpy' or other "transfer 724 // of bits" patterns. 725 bool IsSplittable = Ty->isIntegerTy() && !IsVolatile; 726 727 insertUse(I, Offset, Size, IsSplittable); 728 } 729 730 void visitLoadInst(LoadInst &LI) { 731 assert((!LI.isSimple() || LI.getType()->isSingleValueType()) && 732 "All simple FCA loads should have been pre-split"); 733 734 if (!IsOffsetKnown) 735 return PI.setAborted(&LI); 736 737 const DataLayout &DL = LI.getModule()->getDataLayout(); 738 uint64_t Size = DL.getTypeStoreSize(LI.getType()); 739 return handleLoadOrStore(LI.getType(), LI, Offset, Size, LI.isVolatile()); 740 } 741 742 void visitStoreInst(StoreInst &SI) { 743 Value *ValOp = SI.getValueOperand(); 744 if (ValOp == *U) 745 return PI.setEscapedAndAborted(&SI); 746 if (!IsOffsetKnown) 747 return PI.setAborted(&SI); 748 749 const DataLayout &DL = SI.getModule()->getDataLayout(); 750 uint64_t Size = DL.getTypeStoreSize(ValOp->getType()); 751 752 // If this memory access can be shown to *statically* extend outside the 753 // bounds of of the allocation, it's behavior is undefined, so simply 754 // ignore it. Note that this is more strict than the generic clamping 755 // behavior of insertUse. We also try to handle cases which might run the 756 // risk of overflow. 757 // FIXME: We should instead consider the pointer to have escaped if this 758 // function is being instrumented for addressing bugs or race conditions. 759 if (Size > AllocSize || Offset.ugt(AllocSize - Size)) { 760 DEBUG(dbgs() << "WARNING: Ignoring " << Size << " byte store @" << Offset 761 << " which extends past the end of the " << AllocSize 762 << " byte alloca:\n" 763 << " alloca: " << AS.AI << "\n" 764 << " use: " << SI << "\n"); 765 return markAsDead(SI); 766 } 767 768 assert((!SI.isSimple() || ValOp->getType()->isSingleValueType()) && 769 "All simple FCA stores should have been pre-split"); 770 handleLoadOrStore(ValOp->getType(), SI, Offset, Size, SI.isVolatile()); 771 } 772 773 void visitMemSetInst(MemSetInst &II) { 774 assert(II.getRawDest() == *U && "Pointer use is not the destination?"); 775 ConstantInt *Length = dyn_cast<ConstantInt>(II.getLength()); 776 if ((Length && Length->getValue() == 0) || 777 (IsOffsetKnown && Offset.uge(AllocSize))) 778 // Zero-length mem transfer intrinsics can be ignored entirely. 779 return markAsDead(II); 780 781 if (!IsOffsetKnown) 782 return PI.setAborted(&II); 783 784 insertUse(II, Offset, Length ? Length->getLimitedValue() 785 : AllocSize - Offset.getLimitedValue(), 786 (bool)Length); 787 } 788 789 void visitMemTransferInst(MemTransferInst &II) { 790 ConstantInt *Length = dyn_cast<ConstantInt>(II.getLength()); 791 if (Length && Length->getValue() == 0) 792 // Zero-length mem transfer intrinsics can be ignored entirely. 793 return markAsDead(II); 794 795 // Because we can visit these intrinsics twice, also check to see if the 796 // first time marked this instruction as dead. If so, skip it. 797 if (VisitedDeadInsts.count(&II)) 798 return; 799 800 if (!IsOffsetKnown) 801 return PI.setAborted(&II); 802 803 // This side of the transfer is completely out-of-bounds, and so we can 804 // nuke the entire transfer. However, we also need to nuke the other side 805 // if already added to our partitions. 806 // FIXME: Yet another place we really should bypass this when 807 // instrumenting for ASan. 808 if (Offset.uge(AllocSize)) { 809 SmallDenseMap<Instruction *, unsigned>::iterator MTPI = 810 MemTransferSliceMap.find(&II); 811 if (MTPI != MemTransferSliceMap.end()) 812 AS.Slices[MTPI->second].kill(); 813 return markAsDead(II); 814 } 815 816 uint64_t RawOffset = Offset.getLimitedValue(); 817 uint64_t Size = Length ? Length->getLimitedValue() : AllocSize - RawOffset; 818 819 // Check for the special case where the same exact value is used for both 820 // source and dest. 821 if (*U == II.getRawDest() && *U == II.getRawSource()) { 822 // For non-volatile transfers this is a no-op. 823 if (!II.isVolatile()) 824 return markAsDead(II); 825 826 return insertUse(II, Offset, Size, /*IsSplittable=*/false); 827 } 828 829 // If we have seen both source and destination for a mem transfer, then 830 // they both point to the same alloca. 831 bool Inserted; 832 SmallDenseMap<Instruction *, unsigned>::iterator MTPI; 833 std::tie(MTPI, Inserted) = 834 MemTransferSliceMap.insert(std::make_pair(&II, AS.Slices.size())); 835 unsigned PrevIdx = MTPI->second; 836 if (!Inserted) { 837 Slice &PrevP = AS.Slices[PrevIdx]; 838 839 // Check if the begin offsets match and this is a non-volatile transfer. 840 // In that case, we can completely elide the transfer. 841 if (!II.isVolatile() && PrevP.beginOffset() == RawOffset) { 842 PrevP.kill(); 843 return markAsDead(II); 844 } 845 846 // Otherwise we have an offset transfer within the same alloca. We can't 847 // split those. 848 PrevP.makeUnsplittable(); 849 } 850 851 // Insert the use now that we've fixed up the splittable nature. 852 insertUse(II, Offset, Size, /*IsSplittable=*/Inserted && Length); 853 854 // Check that we ended up with a valid index in the map. 855 assert(AS.Slices[PrevIdx].getUse()->getUser() == &II && 856 "Map index doesn't point back to a slice with this user."); 857 } 858 859 // Disable SRoA for any intrinsics except for lifetime invariants. 860 // FIXME: What about debug intrinsics? This matches old behavior, but 861 // doesn't make sense. 862 void visitIntrinsicInst(IntrinsicInst &II) { 863 if (!IsOffsetKnown) 864 return PI.setAborted(&II); 865 866 if (II.getIntrinsicID() == Intrinsic::lifetime_start || 867 II.getIntrinsicID() == Intrinsic::lifetime_end) { 868 ConstantInt *Length = cast<ConstantInt>(II.getArgOperand(0)); 869 uint64_t Size = std::min(AllocSize - Offset.getLimitedValue(), 870 Length->getLimitedValue()); 871 insertUse(II, Offset, Size, true); 872 return; 873 } 874 875 Base::visitIntrinsicInst(II); 876 } 877 878 Instruction *hasUnsafePHIOrSelectUse(Instruction *Root, uint64_t &Size) { 879 // We consider any PHI or select that results in a direct load or store of 880 // the same offset to be a viable use for slicing purposes. These uses 881 // are considered unsplittable and the size is the maximum loaded or stored 882 // size. 883 SmallPtrSet<Instruction *, 4> Visited; 884 SmallVector<std::pair<Instruction *, Instruction *>, 4> Uses; 885 Visited.insert(Root); 886 Uses.push_back(std::make_pair(cast<Instruction>(*U), Root)); 887 const DataLayout &DL = Root->getModule()->getDataLayout(); 888 // If there are no loads or stores, the access is dead. We mark that as 889 // a size zero access. 890 Size = 0; 891 do { 892 Instruction *I, *UsedI; 893 std::tie(UsedI, I) = Uses.pop_back_val(); 894 895 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 896 Size = std::max(Size, DL.getTypeStoreSize(LI->getType())); 897 continue; 898 } 899 if (StoreInst *SI = dyn_cast<StoreInst>(I)) { 900 Value *Op = SI->getOperand(0); 901 if (Op == UsedI) 902 return SI; 903 Size = std::max(Size, DL.getTypeStoreSize(Op->getType())); 904 continue; 905 } 906 907 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I)) { 908 if (!GEP->hasAllZeroIndices()) 909 return GEP; 910 } else if (!isa<BitCastInst>(I) && !isa<PHINode>(I) && 911 !isa<SelectInst>(I)) { 912 return I; 913 } 914 915 for (User *U : I->users()) 916 if (Visited.insert(cast<Instruction>(U)).second) 917 Uses.push_back(std::make_pair(I, cast<Instruction>(U))); 918 } while (!Uses.empty()); 919 920 return nullptr; 921 } 922 923 void visitPHINodeOrSelectInst(Instruction &I) { 924 assert(isa<PHINode>(I) || isa<SelectInst>(I)); 925 if (I.use_empty()) 926 return markAsDead(I); 927 928 // TODO: We could use SimplifyInstruction here to fold PHINodes and 929 // SelectInsts. However, doing so requires to change the current 930 // dead-operand-tracking mechanism. For instance, suppose neither loading 931 // from %U nor %other traps. Then "load (select undef, %U, %other)" does not 932 // trap either. However, if we simply replace %U with undef using the 933 // current dead-operand-tracking mechanism, "load (select undef, undef, 934 // %other)" may trap because the select may return the first operand 935 // "undef". 936 if (Value *Result = foldPHINodeOrSelectInst(I)) { 937 if (Result == *U) 938 // If the result of the constant fold will be the pointer, recurse 939 // through the PHI/select as if we had RAUW'ed it. 940 enqueueUsers(I); 941 else 942 // Otherwise the operand to the PHI/select is dead, and we can replace 943 // it with undef. 944 AS.DeadOperands.push_back(U); 945 946 return; 947 } 948 949 if (!IsOffsetKnown) 950 return PI.setAborted(&I); 951 952 // See if we already have computed info on this node. 953 uint64_t &Size = PHIOrSelectSizes[&I]; 954 if (!Size) { 955 // This is a new PHI/Select, check for an unsafe use of it. 956 if (Instruction *UnsafeI = hasUnsafePHIOrSelectUse(&I, Size)) 957 return PI.setAborted(UnsafeI); 958 } 959 960 // For PHI and select operands outside the alloca, we can't nuke the entire 961 // phi or select -- the other side might still be relevant, so we special 962 // case them here and use a separate structure to track the operands 963 // themselves which should be replaced with undef. 964 // FIXME: This should instead be escaped in the event we're instrumenting 965 // for address sanitization. 966 if (Offset.uge(AllocSize)) { 967 AS.DeadOperands.push_back(U); 968 return; 969 } 970 971 insertUse(I, Offset, Size); 972 } 973 974 void visitPHINode(PHINode &PN) { visitPHINodeOrSelectInst(PN); } 975 976 void visitSelectInst(SelectInst &SI) { visitPHINodeOrSelectInst(SI); } 977 978 /// \brief Disable SROA entirely if there are unhandled users of the alloca. 979 void visitInstruction(Instruction &I) { PI.setAborted(&I); } 980 }; 981 982 AllocaSlices::AllocaSlices(const DataLayout &DL, AllocaInst &AI) 983 : 984 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 985 AI(AI), 986 #endif 987 PointerEscapingInstr(nullptr) { 988 SliceBuilder PB(DL, AI, *this); 989 SliceBuilder::PtrInfo PtrI = PB.visitPtr(AI); 990 if (PtrI.isEscaped() || PtrI.isAborted()) { 991 // FIXME: We should sink the escape vs. abort info into the caller nicely, 992 // possibly by just storing the PtrInfo in the AllocaSlices. 993 PointerEscapingInstr = PtrI.getEscapingInst() ? PtrI.getEscapingInst() 994 : PtrI.getAbortingInst(); 995 assert(PointerEscapingInstr && "Did not track a bad instruction"); 996 return; 997 } 998 999 Slices.erase(remove_if(Slices, [](const Slice &S) { return S.isDead(); }), 1000 Slices.end()); 1001 1002 #ifndef NDEBUG 1003 if (SROARandomShuffleSlices) { 1004 std::mt19937 MT(static_cast<unsigned>( 1005 std::chrono::system_clock::now().time_since_epoch().count())); 1006 std::shuffle(Slices.begin(), Slices.end(), MT); 1007 } 1008 #endif 1009 1010 // Sort the uses. This arranges for the offsets to be in ascending order, 1011 // and the sizes to be in descending order. 1012 std::sort(Slices.begin(), Slices.end()); 1013 } 1014 1015 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1016 1017 void AllocaSlices::print(raw_ostream &OS, const_iterator I, 1018 StringRef Indent) const { 1019 printSlice(OS, I, Indent); 1020 OS << "\n"; 1021 printUse(OS, I, Indent); 1022 } 1023 1024 void AllocaSlices::printSlice(raw_ostream &OS, const_iterator I, 1025 StringRef Indent) const { 1026 OS << Indent << "[" << I->beginOffset() << "," << I->endOffset() << ")" 1027 << " slice #" << (I - begin()) 1028 << (I->isSplittable() ? " (splittable)" : ""); 1029 } 1030 1031 void AllocaSlices::printUse(raw_ostream &OS, const_iterator I, 1032 StringRef Indent) const { 1033 OS << Indent << " used by: " << *I->getUse()->getUser() << "\n"; 1034 } 1035 1036 void AllocaSlices::print(raw_ostream &OS) const { 1037 if (PointerEscapingInstr) { 1038 OS << "Can't analyze slices for alloca: " << AI << "\n" 1039 << " A pointer to this alloca escaped by:\n" 1040 << " " << *PointerEscapingInstr << "\n"; 1041 return; 1042 } 1043 1044 OS << "Slices of alloca: " << AI << "\n"; 1045 for (const_iterator I = begin(), E = end(); I != E; ++I) 1046 print(OS, I); 1047 } 1048 1049 LLVM_DUMP_METHOD void AllocaSlices::dump(const_iterator I) const { 1050 print(dbgs(), I); 1051 } 1052 LLVM_DUMP_METHOD void AllocaSlices::dump() const { print(dbgs()); } 1053 1054 #endif // !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1055 1056 /// Walk the range of a partitioning looking for a common type to cover this 1057 /// sequence of slices. 1058 static Type *findCommonType(AllocaSlices::const_iterator B, 1059 AllocaSlices::const_iterator E, 1060 uint64_t EndOffset) { 1061 Type *Ty = nullptr; 1062 bool TyIsCommon = true; 1063 IntegerType *ITy = nullptr; 1064 1065 // Note that we need to look at *every* alloca slice's Use to ensure we 1066 // always get consistent results regardless of the order of slices. 1067 for (AllocaSlices::const_iterator I = B; I != E; ++I) { 1068 Use *U = I->getUse(); 1069 if (isa<IntrinsicInst>(*U->getUser())) 1070 continue; 1071 if (I->beginOffset() != B->beginOffset() || I->endOffset() != EndOffset) 1072 continue; 1073 1074 Type *UserTy = nullptr; 1075 if (LoadInst *LI = dyn_cast<LoadInst>(U->getUser())) { 1076 UserTy = LI->getType(); 1077 } else if (StoreInst *SI = dyn_cast<StoreInst>(U->getUser())) { 1078 UserTy = SI->getValueOperand()->getType(); 1079 } 1080 1081 if (IntegerType *UserITy = dyn_cast_or_null<IntegerType>(UserTy)) { 1082 // If the type is larger than the partition, skip it. We only encounter 1083 // this for split integer operations where we want to use the type of the 1084 // entity causing the split. Also skip if the type is not a byte width 1085 // multiple. 1086 if (UserITy->getBitWidth() % 8 != 0 || 1087 UserITy->getBitWidth() / 8 > (EndOffset - B->beginOffset())) 1088 continue; 1089 1090 // Track the largest bitwidth integer type used in this way in case there 1091 // is no common type. 1092 if (!ITy || ITy->getBitWidth() < UserITy->getBitWidth()) 1093 ITy = UserITy; 1094 } 1095 1096 // To avoid depending on the order of slices, Ty and TyIsCommon must not 1097 // depend on types skipped above. 1098 if (!UserTy || (Ty && Ty != UserTy)) 1099 TyIsCommon = false; // Give up on anything but an iN type. 1100 else 1101 Ty = UserTy; 1102 } 1103 1104 return TyIsCommon ? Ty : ITy; 1105 } 1106 1107 /// PHI instructions that use an alloca and are subsequently loaded can be 1108 /// rewritten to load both input pointers in the pred blocks and then PHI the 1109 /// results, allowing the load of the alloca to be promoted. 1110 /// From this: 1111 /// %P2 = phi [i32* %Alloca, i32* %Other] 1112 /// %V = load i32* %P2 1113 /// to: 1114 /// %V1 = load i32* %Alloca -> will be mem2reg'd 1115 /// ... 1116 /// %V2 = load i32* %Other 1117 /// ... 1118 /// %V = phi [i32 %V1, i32 %V2] 1119 /// 1120 /// We can do this to a select if its only uses are loads and if the operands 1121 /// to the select can be loaded unconditionally. 1122 /// 1123 /// FIXME: This should be hoisted into a generic utility, likely in 1124 /// Transforms/Util/Local.h 1125 static bool isSafePHIToSpeculate(PHINode &PN) { 1126 // For now, we can only do this promotion if the load is in the same block 1127 // as the PHI, and if there are no stores between the phi and load. 1128 // TODO: Allow recursive phi users. 1129 // TODO: Allow stores. 1130 BasicBlock *BB = PN.getParent(); 1131 unsigned MaxAlign = 0; 1132 bool HaveLoad = false; 1133 for (User *U : PN.users()) { 1134 LoadInst *LI = dyn_cast<LoadInst>(U); 1135 if (!LI || !LI->isSimple()) 1136 return false; 1137 1138 // For now we only allow loads in the same block as the PHI. This is 1139 // a common case that happens when instcombine merges two loads through 1140 // a PHI. 1141 if (LI->getParent() != BB) 1142 return false; 1143 1144 // Ensure that there are no instructions between the PHI and the load that 1145 // could store. 1146 for (BasicBlock::iterator BBI(PN); &*BBI != LI; ++BBI) 1147 if (BBI->mayWriteToMemory()) 1148 return false; 1149 1150 MaxAlign = std::max(MaxAlign, LI->getAlignment()); 1151 HaveLoad = true; 1152 } 1153 1154 if (!HaveLoad) 1155 return false; 1156 1157 const DataLayout &DL = PN.getModule()->getDataLayout(); 1158 1159 // We can only transform this if it is safe to push the loads into the 1160 // predecessor blocks. The only thing to watch out for is that we can't put 1161 // a possibly trapping load in the predecessor if it is a critical edge. 1162 for (unsigned Idx = 0, Num = PN.getNumIncomingValues(); Idx != Num; ++Idx) { 1163 TerminatorInst *TI = PN.getIncomingBlock(Idx)->getTerminator(); 1164 Value *InVal = PN.getIncomingValue(Idx); 1165 1166 // If the value is produced by the terminator of the predecessor (an 1167 // invoke) or it has side-effects, there is no valid place to put a load 1168 // in the predecessor. 1169 if (TI == InVal || TI->mayHaveSideEffects()) 1170 return false; 1171 1172 // If the predecessor has a single successor, then the edge isn't 1173 // critical. 1174 if (TI->getNumSuccessors() == 1) 1175 continue; 1176 1177 // If this pointer is always safe to load, or if we can prove that there 1178 // is already a load in the block, then we can move the load to the pred 1179 // block. 1180 if (isSafeToLoadUnconditionally(InVal, MaxAlign, DL, TI)) 1181 continue; 1182 1183 return false; 1184 } 1185 1186 return true; 1187 } 1188 1189 static void speculatePHINodeLoads(PHINode &PN) { 1190 DEBUG(dbgs() << " original: " << PN << "\n"); 1191 1192 Type *LoadTy = cast<PointerType>(PN.getType())->getElementType(); 1193 IRBuilderTy PHIBuilder(&PN); 1194 PHINode *NewPN = PHIBuilder.CreatePHI(LoadTy, PN.getNumIncomingValues(), 1195 PN.getName() + ".sroa.speculated"); 1196 1197 // Get the AA tags and alignment to use from one of the loads. It doesn't 1198 // matter which one we get and if any differ. 1199 LoadInst *SomeLoad = cast<LoadInst>(PN.user_back()); 1200 1201 AAMDNodes AATags; 1202 SomeLoad->getAAMetadata(AATags); 1203 unsigned Align = SomeLoad->getAlignment(); 1204 1205 // Rewrite all loads of the PN to use the new PHI. 1206 while (!PN.use_empty()) { 1207 LoadInst *LI = cast<LoadInst>(PN.user_back()); 1208 LI->replaceAllUsesWith(NewPN); 1209 LI->eraseFromParent(); 1210 } 1211 1212 // Inject loads into all of the pred blocks. 1213 for (unsigned Idx = 0, Num = PN.getNumIncomingValues(); Idx != Num; ++Idx) { 1214 BasicBlock *Pred = PN.getIncomingBlock(Idx); 1215 TerminatorInst *TI = Pred->getTerminator(); 1216 Value *InVal = PN.getIncomingValue(Idx); 1217 IRBuilderTy PredBuilder(TI); 1218 1219 LoadInst *Load = PredBuilder.CreateLoad( 1220 InVal, (PN.getName() + ".sroa.speculate.load." + Pred->getName())); 1221 ++NumLoadsSpeculated; 1222 Load->setAlignment(Align); 1223 if (AATags) 1224 Load->setAAMetadata(AATags); 1225 NewPN->addIncoming(Load, Pred); 1226 } 1227 1228 DEBUG(dbgs() << " speculated to: " << *NewPN << "\n"); 1229 PN.eraseFromParent(); 1230 } 1231 1232 /// Select instructions that use an alloca and are subsequently loaded can be 1233 /// rewritten to load both input pointers and then select between the result, 1234 /// allowing the load of the alloca to be promoted. 1235 /// From this: 1236 /// %P2 = select i1 %cond, i32* %Alloca, i32* %Other 1237 /// %V = load i32* %P2 1238 /// to: 1239 /// %V1 = load i32* %Alloca -> will be mem2reg'd 1240 /// %V2 = load i32* %Other 1241 /// %V = select i1 %cond, i32 %V1, i32 %V2 1242 /// 1243 /// We can do this to a select if its only uses are loads and if the operand 1244 /// to the select can be loaded unconditionally. 1245 static bool isSafeSelectToSpeculate(SelectInst &SI) { 1246 Value *TValue = SI.getTrueValue(); 1247 Value *FValue = SI.getFalseValue(); 1248 const DataLayout &DL = SI.getModule()->getDataLayout(); 1249 1250 for (User *U : SI.users()) { 1251 LoadInst *LI = dyn_cast<LoadInst>(U); 1252 if (!LI || !LI->isSimple()) 1253 return false; 1254 1255 // Both operands to the select need to be dereferenceable, either 1256 // absolutely (e.g. allocas) or at this point because we can see other 1257 // accesses to it. 1258 if (!isSafeToLoadUnconditionally(TValue, LI->getAlignment(), DL, LI)) 1259 return false; 1260 if (!isSafeToLoadUnconditionally(FValue, LI->getAlignment(), DL, LI)) 1261 return false; 1262 } 1263 1264 return true; 1265 } 1266 1267 static void speculateSelectInstLoads(SelectInst &SI) { 1268 DEBUG(dbgs() << " original: " << SI << "\n"); 1269 1270 IRBuilderTy IRB(&SI); 1271 Value *TV = SI.getTrueValue(); 1272 Value *FV = SI.getFalseValue(); 1273 // Replace the loads of the select with a select of two loads. 1274 while (!SI.use_empty()) { 1275 LoadInst *LI = cast<LoadInst>(SI.user_back()); 1276 assert(LI->isSimple() && "We only speculate simple loads"); 1277 1278 IRB.SetInsertPoint(LI); 1279 LoadInst *TL = 1280 IRB.CreateLoad(TV, LI->getName() + ".sroa.speculate.load.true"); 1281 LoadInst *FL = 1282 IRB.CreateLoad(FV, LI->getName() + ".sroa.speculate.load.false"); 1283 NumLoadsSpeculated += 2; 1284 1285 // Transfer alignment and AA info if present. 1286 TL->setAlignment(LI->getAlignment()); 1287 FL->setAlignment(LI->getAlignment()); 1288 1289 AAMDNodes Tags; 1290 LI->getAAMetadata(Tags); 1291 if (Tags) { 1292 TL->setAAMetadata(Tags); 1293 FL->setAAMetadata(Tags); 1294 } 1295 1296 Value *V = IRB.CreateSelect(SI.getCondition(), TL, FL, 1297 LI->getName() + ".sroa.speculated"); 1298 1299 DEBUG(dbgs() << " speculated to: " << *V << "\n"); 1300 LI->replaceAllUsesWith(V); 1301 LI->eraseFromParent(); 1302 } 1303 SI.eraseFromParent(); 1304 } 1305 1306 /// \brief Build a GEP out of a base pointer and indices. 1307 /// 1308 /// This will return the BasePtr if that is valid, or build a new GEP 1309 /// instruction using the IRBuilder if GEP-ing is needed. 1310 static Value *buildGEP(IRBuilderTy &IRB, Value *BasePtr, 1311 SmallVectorImpl<Value *> &Indices, Twine NamePrefix) { 1312 if (Indices.empty()) 1313 return BasePtr; 1314 1315 // A single zero index is a no-op, so check for this and avoid building a GEP 1316 // in that case. 1317 if (Indices.size() == 1 && cast<ConstantInt>(Indices.back())->isZero()) 1318 return BasePtr; 1319 1320 return IRB.CreateInBoundsGEP(nullptr, BasePtr, Indices, 1321 NamePrefix + "sroa_idx"); 1322 } 1323 1324 /// \brief Get a natural GEP off of the BasePtr walking through Ty toward 1325 /// TargetTy without changing the offset of the pointer. 1326 /// 1327 /// This routine assumes we've already established a properly offset GEP with 1328 /// Indices, and arrived at the Ty type. The goal is to continue to GEP with 1329 /// zero-indices down through type layers until we find one the same as 1330 /// TargetTy. If we can't find one with the same type, we at least try to use 1331 /// one with the same size. If none of that works, we just produce the GEP as 1332 /// indicated by Indices to have the correct offset. 1333 static Value *getNaturalGEPWithType(IRBuilderTy &IRB, const DataLayout &DL, 1334 Value *BasePtr, Type *Ty, Type *TargetTy, 1335 SmallVectorImpl<Value *> &Indices, 1336 Twine NamePrefix) { 1337 if (Ty == TargetTy) 1338 return buildGEP(IRB, BasePtr, Indices, NamePrefix); 1339 1340 // Pointer size to use for the indices. 1341 unsigned PtrSize = DL.getPointerTypeSizeInBits(BasePtr->getType()); 1342 1343 // See if we can descend into a struct and locate a field with the correct 1344 // type. 1345 unsigned NumLayers = 0; 1346 Type *ElementTy = Ty; 1347 do { 1348 if (ElementTy->isPointerTy()) 1349 break; 1350 1351 if (ArrayType *ArrayTy = dyn_cast<ArrayType>(ElementTy)) { 1352 ElementTy = ArrayTy->getElementType(); 1353 Indices.push_back(IRB.getIntN(PtrSize, 0)); 1354 } else if (VectorType *VectorTy = dyn_cast<VectorType>(ElementTy)) { 1355 ElementTy = VectorTy->getElementType(); 1356 Indices.push_back(IRB.getInt32(0)); 1357 } else if (StructType *STy = dyn_cast<StructType>(ElementTy)) { 1358 if (STy->element_begin() == STy->element_end()) 1359 break; // Nothing left to descend into. 1360 ElementTy = *STy->element_begin(); 1361 Indices.push_back(IRB.getInt32(0)); 1362 } else { 1363 break; 1364 } 1365 ++NumLayers; 1366 } while (ElementTy != TargetTy); 1367 if (ElementTy != TargetTy) 1368 Indices.erase(Indices.end() - NumLayers, Indices.end()); 1369 1370 return buildGEP(IRB, BasePtr, Indices, NamePrefix); 1371 } 1372 1373 /// \brief Recursively compute indices for a natural GEP. 1374 /// 1375 /// This is the recursive step for getNaturalGEPWithOffset that walks down the 1376 /// element types adding appropriate indices for the GEP. 1377 static Value *getNaturalGEPRecursively(IRBuilderTy &IRB, const DataLayout &DL, 1378 Value *Ptr, Type *Ty, APInt &Offset, 1379 Type *TargetTy, 1380 SmallVectorImpl<Value *> &Indices, 1381 Twine NamePrefix) { 1382 if (Offset == 0) 1383 return getNaturalGEPWithType(IRB, DL, Ptr, Ty, TargetTy, Indices, 1384 NamePrefix); 1385 1386 // We can't recurse through pointer types. 1387 if (Ty->isPointerTy()) 1388 return nullptr; 1389 1390 // We try to analyze GEPs over vectors here, but note that these GEPs are 1391 // extremely poorly defined currently. The long-term goal is to remove GEPing 1392 // over a vector from the IR completely. 1393 if (VectorType *VecTy = dyn_cast<VectorType>(Ty)) { 1394 unsigned ElementSizeInBits = DL.getTypeSizeInBits(VecTy->getScalarType()); 1395 if (ElementSizeInBits % 8 != 0) { 1396 // GEPs over non-multiple of 8 size vector elements are invalid. 1397 return nullptr; 1398 } 1399 APInt ElementSize(Offset.getBitWidth(), ElementSizeInBits / 8); 1400 APInt NumSkippedElements = Offset.sdiv(ElementSize); 1401 if (NumSkippedElements.ugt(VecTy->getNumElements())) 1402 return nullptr; 1403 Offset -= NumSkippedElements * ElementSize; 1404 Indices.push_back(IRB.getInt(NumSkippedElements)); 1405 return getNaturalGEPRecursively(IRB, DL, Ptr, VecTy->getElementType(), 1406 Offset, TargetTy, Indices, NamePrefix); 1407 } 1408 1409 if (ArrayType *ArrTy = dyn_cast<ArrayType>(Ty)) { 1410 Type *ElementTy = ArrTy->getElementType(); 1411 APInt ElementSize(Offset.getBitWidth(), DL.getTypeAllocSize(ElementTy)); 1412 APInt NumSkippedElements = Offset.sdiv(ElementSize); 1413 if (NumSkippedElements.ugt(ArrTy->getNumElements())) 1414 return nullptr; 1415 1416 Offset -= NumSkippedElements * ElementSize; 1417 Indices.push_back(IRB.getInt(NumSkippedElements)); 1418 return getNaturalGEPRecursively(IRB, DL, Ptr, ElementTy, Offset, TargetTy, 1419 Indices, NamePrefix); 1420 } 1421 1422 StructType *STy = dyn_cast<StructType>(Ty); 1423 if (!STy) 1424 return nullptr; 1425 1426 const StructLayout *SL = DL.getStructLayout(STy); 1427 uint64_t StructOffset = Offset.getZExtValue(); 1428 if (StructOffset >= SL->getSizeInBytes()) 1429 return nullptr; 1430 unsigned Index = SL->getElementContainingOffset(StructOffset); 1431 Offset -= APInt(Offset.getBitWidth(), SL->getElementOffset(Index)); 1432 Type *ElementTy = STy->getElementType(Index); 1433 if (Offset.uge(DL.getTypeAllocSize(ElementTy))) 1434 return nullptr; // The offset points into alignment padding. 1435 1436 Indices.push_back(IRB.getInt32(Index)); 1437 return getNaturalGEPRecursively(IRB, DL, Ptr, ElementTy, Offset, TargetTy, 1438 Indices, NamePrefix); 1439 } 1440 1441 /// \brief Get a natural GEP from a base pointer to a particular offset and 1442 /// resulting in a particular type. 1443 /// 1444 /// The goal is to produce a "natural" looking GEP that works with the existing 1445 /// composite types to arrive at the appropriate offset and element type for 1446 /// a pointer. TargetTy is the element type the returned GEP should point-to if 1447 /// possible. We recurse by decreasing Offset, adding the appropriate index to 1448 /// Indices, and setting Ty to the result subtype. 1449 /// 1450 /// If no natural GEP can be constructed, this function returns null. 1451 static Value *getNaturalGEPWithOffset(IRBuilderTy &IRB, const DataLayout &DL, 1452 Value *Ptr, APInt Offset, Type *TargetTy, 1453 SmallVectorImpl<Value *> &Indices, 1454 Twine NamePrefix) { 1455 PointerType *Ty = cast<PointerType>(Ptr->getType()); 1456 1457 // Don't consider any GEPs through an i8* as natural unless the TargetTy is 1458 // an i8. 1459 if (Ty == IRB.getInt8PtrTy(Ty->getAddressSpace()) && TargetTy->isIntegerTy(8)) 1460 return nullptr; 1461 1462 Type *ElementTy = Ty->getElementType(); 1463 if (!ElementTy->isSized()) 1464 return nullptr; // We can't GEP through an unsized element. 1465 APInt ElementSize(Offset.getBitWidth(), DL.getTypeAllocSize(ElementTy)); 1466 if (ElementSize == 0) 1467 return nullptr; // Zero-length arrays can't help us build a natural GEP. 1468 APInt NumSkippedElements = Offset.sdiv(ElementSize); 1469 1470 Offset -= NumSkippedElements * ElementSize; 1471 Indices.push_back(IRB.getInt(NumSkippedElements)); 1472 return getNaturalGEPRecursively(IRB, DL, Ptr, ElementTy, Offset, TargetTy, 1473 Indices, NamePrefix); 1474 } 1475 1476 /// \brief Compute an adjusted pointer from Ptr by Offset bytes where the 1477 /// resulting pointer has PointerTy. 1478 /// 1479 /// This tries very hard to compute a "natural" GEP which arrives at the offset 1480 /// and produces the pointer type desired. Where it cannot, it will try to use 1481 /// the natural GEP to arrive at the offset and bitcast to the type. Where that 1482 /// fails, it will try to use an existing i8* and GEP to the byte offset and 1483 /// bitcast to the type. 1484 /// 1485 /// The strategy for finding the more natural GEPs is to peel off layers of the 1486 /// pointer, walking back through bit casts and GEPs, searching for a base 1487 /// pointer from which we can compute a natural GEP with the desired 1488 /// properties. The algorithm tries to fold as many constant indices into 1489 /// a single GEP as possible, thus making each GEP more independent of the 1490 /// surrounding code. 1491 static Value *getAdjustedPtr(IRBuilderTy &IRB, const DataLayout &DL, Value *Ptr, 1492 APInt Offset, Type *PointerTy, Twine NamePrefix) { 1493 // Even though we don't look through PHI nodes, we could be called on an 1494 // instruction in an unreachable block, which may be on a cycle. 1495 SmallPtrSet<Value *, 4> Visited; 1496 Visited.insert(Ptr); 1497 SmallVector<Value *, 4> Indices; 1498 1499 // We may end up computing an offset pointer that has the wrong type. If we 1500 // never are able to compute one directly that has the correct type, we'll 1501 // fall back to it, so keep it and the base it was computed from around here. 1502 Value *OffsetPtr = nullptr; 1503 Value *OffsetBasePtr; 1504 1505 // Remember any i8 pointer we come across to re-use if we need to do a raw 1506 // byte offset. 1507 Value *Int8Ptr = nullptr; 1508 APInt Int8PtrOffset(Offset.getBitWidth(), 0); 1509 1510 Type *TargetTy = PointerTy->getPointerElementType(); 1511 1512 do { 1513 // First fold any existing GEPs into the offset. 1514 while (GEPOperator *GEP = dyn_cast<GEPOperator>(Ptr)) { 1515 APInt GEPOffset(Offset.getBitWidth(), 0); 1516 if (!GEP->accumulateConstantOffset(DL, GEPOffset)) 1517 break; 1518 Offset += GEPOffset; 1519 Ptr = GEP->getPointerOperand(); 1520 if (!Visited.insert(Ptr).second) 1521 break; 1522 } 1523 1524 // See if we can perform a natural GEP here. 1525 Indices.clear(); 1526 if (Value *P = getNaturalGEPWithOffset(IRB, DL, Ptr, Offset, TargetTy, 1527 Indices, NamePrefix)) { 1528 // If we have a new natural pointer at the offset, clear out any old 1529 // offset pointer we computed. Unless it is the base pointer or 1530 // a non-instruction, we built a GEP we don't need. Zap it. 1531 if (OffsetPtr && OffsetPtr != OffsetBasePtr) 1532 if (Instruction *I = dyn_cast<Instruction>(OffsetPtr)) { 1533 assert(I->use_empty() && "Built a GEP with uses some how!"); 1534 I->eraseFromParent(); 1535 } 1536 OffsetPtr = P; 1537 OffsetBasePtr = Ptr; 1538 // If we also found a pointer of the right type, we're done. 1539 if (P->getType() == PointerTy) 1540 return P; 1541 } 1542 1543 // Stash this pointer if we've found an i8*. 1544 if (Ptr->getType()->isIntegerTy(8)) { 1545 Int8Ptr = Ptr; 1546 Int8PtrOffset = Offset; 1547 } 1548 1549 // Peel off a layer of the pointer and update the offset appropriately. 1550 if (Operator::getOpcode(Ptr) == Instruction::BitCast) { 1551 Ptr = cast<Operator>(Ptr)->getOperand(0); 1552 } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(Ptr)) { 1553 if (GA->isInterposable()) 1554 break; 1555 Ptr = GA->getAliasee(); 1556 } else { 1557 break; 1558 } 1559 assert(Ptr->getType()->isPointerTy() && "Unexpected operand type!"); 1560 } while (Visited.insert(Ptr).second); 1561 1562 if (!OffsetPtr) { 1563 if (!Int8Ptr) { 1564 Int8Ptr = IRB.CreateBitCast( 1565 Ptr, IRB.getInt8PtrTy(PointerTy->getPointerAddressSpace()), 1566 NamePrefix + "sroa_raw_cast"); 1567 Int8PtrOffset = Offset; 1568 } 1569 1570 OffsetPtr = Int8PtrOffset == 0 1571 ? Int8Ptr 1572 : IRB.CreateInBoundsGEP(IRB.getInt8Ty(), Int8Ptr, 1573 IRB.getInt(Int8PtrOffset), 1574 NamePrefix + "sroa_raw_idx"); 1575 } 1576 Ptr = OffsetPtr; 1577 1578 // On the off chance we were targeting i8*, guard the bitcast here. 1579 if (Ptr->getType() != PointerTy) 1580 Ptr = IRB.CreateBitCast(Ptr, PointerTy, NamePrefix + "sroa_cast"); 1581 1582 return Ptr; 1583 } 1584 1585 /// \brief Compute the adjusted alignment for a load or store from an offset. 1586 static unsigned getAdjustedAlignment(Instruction *I, uint64_t Offset, 1587 const DataLayout &DL) { 1588 unsigned Alignment; 1589 Type *Ty; 1590 if (auto *LI = dyn_cast<LoadInst>(I)) { 1591 Alignment = LI->getAlignment(); 1592 Ty = LI->getType(); 1593 } else if (auto *SI = dyn_cast<StoreInst>(I)) { 1594 Alignment = SI->getAlignment(); 1595 Ty = SI->getValueOperand()->getType(); 1596 } else { 1597 llvm_unreachable("Only loads and stores are allowed!"); 1598 } 1599 1600 if (!Alignment) 1601 Alignment = DL.getABITypeAlignment(Ty); 1602 1603 return MinAlign(Alignment, Offset); 1604 } 1605 1606 /// \brief Test whether we can convert a value from the old to the new type. 1607 /// 1608 /// This predicate should be used to guard calls to convertValue in order to 1609 /// ensure that we only try to convert viable values. The strategy is that we 1610 /// will peel off single element struct and array wrappings to get to an 1611 /// underlying value, and convert that value. 1612 static bool canConvertValue(const DataLayout &DL, Type *OldTy, Type *NewTy) { 1613 if (OldTy == NewTy) 1614 return true; 1615 1616 // For integer types, we can't handle any bit-width differences. This would 1617 // break both vector conversions with extension and introduce endianness 1618 // issues when in conjunction with loads and stores. 1619 if (isa<IntegerType>(OldTy) && isa<IntegerType>(NewTy)) { 1620 assert(cast<IntegerType>(OldTy)->getBitWidth() != 1621 cast<IntegerType>(NewTy)->getBitWidth() && 1622 "We can't have the same bitwidth for different int types"); 1623 return false; 1624 } 1625 1626 if (DL.getTypeSizeInBits(NewTy) != DL.getTypeSizeInBits(OldTy)) 1627 return false; 1628 if (!NewTy->isSingleValueType() || !OldTy->isSingleValueType()) 1629 return false; 1630 1631 // We can convert pointers to integers and vice-versa. Same for vectors 1632 // of pointers and integers. 1633 OldTy = OldTy->getScalarType(); 1634 NewTy = NewTy->getScalarType(); 1635 if (NewTy->isPointerTy() || OldTy->isPointerTy()) { 1636 if (NewTy->isPointerTy() && OldTy->isPointerTy()) { 1637 return cast<PointerType>(NewTy)->getPointerAddressSpace() == 1638 cast<PointerType>(OldTy)->getPointerAddressSpace(); 1639 } 1640 1641 // We can convert integers to integral pointers, but not to non-integral 1642 // pointers. 1643 if (OldTy->isIntegerTy()) 1644 return !DL.isNonIntegralPointerType(NewTy); 1645 1646 // We can convert integral pointers to integers, but non-integral pointers 1647 // need to remain pointers. 1648 if (!DL.isNonIntegralPointerType(OldTy)) 1649 return NewTy->isIntegerTy(); 1650 1651 return false; 1652 } 1653 1654 return true; 1655 } 1656 1657 /// \brief Generic routine to convert an SSA value to a value of a different 1658 /// type. 1659 /// 1660 /// This will try various different casting techniques, such as bitcasts, 1661 /// inttoptr, and ptrtoint casts. Use the \c canConvertValue predicate to test 1662 /// two types for viability with this routine. 1663 static Value *convertValue(const DataLayout &DL, IRBuilderTy &IRB, Value *V, 1664 Type *NewTy) { 1665 Type *OldTy = V->getType(); 1666 assert(canConvertValue(DL, OldTy, NewTy) && "Value not convertable to type"); 1667 1668 if (OldTy == NewTy) 1669 return V; 1670 1671 assert(!(isa<IntegerType>(OldTy) && isa<IntegerType>(NewTy)) && 1672 "Integer types must be the exact same to convert."); 1673 1674 // See if we need inttoptr for this type pair. A cast involving both scalars 1675 // and vectors requires and additional bitcast. 1676 if (OldTy->getScalarType()->isIntegerTy() && 1677 NewTy->getScalarType()->isPointerTy()) { 1678 // Expand <2 x i32> to i8* --> <2 x i32> to i64 to i8* 1679 if (OldTy->isVectorTy() && !NewTy->isVectorTy()) 1680 return IRB.CreateIntToPtr(IRB.CreateBitCast(V, DL.getIntPtrType(NewTy)), 1681 NewTy); 1682 1683 // Expand i128 to <2 x i8*> --> i128 to <2 x i64> to <2 x i8*> 1684 if (!OldTy->isVectorTy() && NewTy->isVectorTy()) 1685 return IRB.CreateIntToPtr(IRB.CreateBitCast(V, DL.getIntPtrType(NewTy)), 1686 NewTy); 1687 1688 return IRB.CreateIntToPtr(V, NewTy); 1689 } 1690 1691 // See if we need ptrtoint for this type pair. A cast involving both scalars 1692 // and vectors requires and additional bitcast. 1693 if (OldTy->getScalarType()->isPointerTy() && 1694 NewTy->getScalarType()->isIntegerTy()) { 1695 // Expand <2 x i8*> to i128 --> <2 x i8*> to <2 x i64> to i128 1696 if (OldTy->isVectorTy() && !NewTy->isVectorTy()) 1697 return IRB.CreateBitCast(IRB.CreatePtrToInt(V, DL.getIntPtrType(OldTy)), 1698 NewTy); 1699 1700 // Expand i8* to <2 x i32> --> i8* to i64 to <2 x i32> 1701 if (!OldTy->isVectorTy() && NewTy->isVectorTy()) 1702 return IRB.CreateBitCast(IRB.CreatePtrToInt(V, DL.getIntPtrType(OldTy)), 1703 NewTy); 1704 1705 return IRB.CreatePtrToInt(V, NewTy); 1706 } 1707 1708 return IRB.CreateBitCast(V, NewTy); 1709 } 1710 1711 /// \brief Test whether the given slice use can be promoted to a vector. 1712 /// 1713 /// This function is called to test each entry in a partition which is slated 1714 /// for a single slice. 1715 static bool isVectorPromotionViableForSlice(Partition &P, const Slice &S, 1716 VectorType *Ty, 1717 uint64_t ElementSize, 1718 const DataLayout &DL) { 1719 // First validate the slice offsets. 1720 uint64_t BeginOffset = 1721 std::max(S.beginOffset(), P.beginOffset()) - P.beginOffset(); 1722 uint64_t BeginIndex = BeginOffset / ElementSize; 1723 if (BeginIndex * ElementSize != BeginOffset || 1724 BeginIndex >= Ty->getNumElements()) 1725 return false; 1726 uint64_t EndOffset = 1727 std::min(S.endOffset(), P.endOffset()) - P.beginOffset(); 1728 uint64_t EndIndex = EndOffset / ElementSize; 1729 if (EndIndex * ElementSize != EndOffset || EndIndex > Ty->getNumElements()) 1730 return false; 1731 1732 assert(EndIndex > BeginIndex && "Empty vector!"); 1733 uint64_t NumElements = EndIndex - BeginIndex; 1734 Type *SliceTy = (NumElements == 1) 1735 ? Ty->getElementType() 1736 : VectorType::get(Ty->getElementType(), NumElements); 1737 1738 Type *SplitIntTy = 1739 Type::getIntNTy(Ty->getContext(), NumElements * ElementSize * 8); 1740 1741 Use *U = S.getUse(); 1742 1743 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(U->getUser())) { 1744 if (MI->isVolatile()) 1745 return false; 1746 if (!S.isSplittable()) 1747 return false; // Skip any unsplittable intrinsics. 1748 } else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(U->getUser())) { 1749 if (II->getIntrinsicID() != Intrinsic::lifetime_start && 1750 II->getIntrinsicID() != Intrinsic::lifetime_end) 1751 return false; 1752 } else if (U->get()->getType()->getPointerElementType()->isStructTy()) { 1753 // Disable vector promotion when there are loads or stores of an FCA. 1754 return false; 1755 } else if (LoadInst *LI = dyn_cast<LoadInst>(U->getUser())) { 1756 if (LI->isVolatile()) 1757 return false; 1758 Type *LTy = LI->getType(); 1759 if (P.beginOffset() > S.beginOffset() || P.endOffset() < S.endOffset()) { 1760 assert(LTy->isIntegerTy()); 1761 LTy = SplitIntTy; 1762 } 1763 if (!canConvertValue(DL, SliceTy, LTy)) 1764 return false; 1765 } else if (StoreInst *SI = dyn_cast<StoreInst>(U->getUser())) { 1766 if (SI->isVolatile()) 1767 return false; 1768 Type *STy = SI->getValueOperand()->getType(); 1769 if (P.beginOffset() > S.beginOffset() || P.endOffset() < S.endOffset()) { 1770 assert(STy->isIntegerTy()); 1771 STy = SplitIntTy; 1772 } 1773 if (!canConvertValue(DL, STy, SliceTy)) 1774 return false; 1775 } else { 1776 return false; 1777 } 1778 1779 return true; 1780 } 1781 1782 /// \brief Test whether the given alloca partitioning and range of slices can be 1783 /// promoted to a vector. 1784 /// 1785 /// This is a quick test to check whether we can rewrite a particular alloca 1786 /// partition (and its newly formed alloca) into a vector alloca with only 1787 /// whole-vector loads and stores such that it could be promoted to a vector 1788 /// SSA value. We only can ensure this for a limited set of operations, and we 1789 /// don't want to do the rewrites unless we are confident that the result will 1790 /// be promotable, so we have an early test here. 1791 static VectorType *isVectorPromotionViable(Partition &P, const DataLayout &DL) { 1792 // Collect the candidate types for vector-based promotion. Also track whether 1793 // we have different element types. 1794 SmallVector<VectorType *, 4> CandidateTys; 1795 Type *CommonEltTy = nullptr; 1796 bool HaveCommonEltTy = true; 1797 auto CheckCandidateType = [&](Type *Ty) { 1798 if (auto *VTy = dyn_cast<VectorType>(Ty)) { 1799 CandidateTys.push_back(VTy); 1800 if (!CommonEltTy) 1801 CommonEltTy = VTy->getElementType(); 1802 else if (CommonEltTy != VTy->getElementType()) 1803 HaveCommonEltTy = false; 1804 } 1805 }; 1806 // Consider any loads or stores that are the exact size of the slice. 1807 for (const Slice &S : P) 1808 if (S.beginOffset() == P.beginOffset() && 1809 S.endOffset() == P.endOffset()) { 1810 if (auto *LI = dyn_cast<LoadInst>(S.getUse()->getUser())) 1811 CheckCandidateType(LI->getType()); 1812 else if (auto *SI = dyn_cast<StoreInst>(S.getUse()->getUser())) 1813 CheckCandidateType(SI->getValueOperand()->getType()); 1814 } 1815 1816 // If we didn't find a vector type, nothing to do here. 1817 if (CandidateTys.empty()) 1818 return nullptr; 1819 1820 // Remove non-integer vector types if we had multiple common element types. 1821 // FIXME: It'd be nice to replace them with integer vector types, but we can't 1822 // do that until all the backends are known to produce good code for all 1823 // integer vector types. 1824 if (!HaveCommonEltTy) { 1825 CandidateTys.erase(remove_if(CandidateTys, 1826 [](VectorType *VTy) { 1827 return !VTy->getElementType()->isIntegerTy(); 1828 }), 1829 CandidateTys.end()); 1830 1831 // If there were no integer vector types, give up. 1832 if (CandidateTys.empty()) 1833 return nullptr; 1834 1835 // Rank the remaining candidate vector types. This is easy because we know 1836 // they're all integer vectors. We sort by ascending number of elements. 1837 auto RankVectorTypes = [&DL](VectorType *RHSTy, VectorType *LHSTy) { 1838 (void)DL; 1839 assert(DL.getTypeSizeInBits(RHSTy) == DL.getTypeSizeInBits(LHSTy) && 1840 "Cannot have vector types of different sizes!"); 1841 assert(RHSTy->getElementType()->isIntegerTy() && 1842 "All non-integer types eliminated!"); 1843 assert(LHSTy->getElementType()->isIntegerTy() && 1844 "All non-integer types eliminated!"); 1845 return RHSTy->getNumElements() < LHSTy->getNumElements(); 1846 }; 1847 std::sort(CandidateTys.begin(), CandidateTys.end(), RankVectorTypes); 1848 CandidateTys.erase( 1849 std::unique(CandidateTys.begin(), CandidateTys.end(), RankVectorTypes), 1850 CandidateTys.end()); 1851 } else { 1852 // The only way to have the same element type in every vector type is to 1853 // have the same vector type. Check that and remove all but one. 1854 #ifndef NDEBUG 1855 for (VectorType *VTy : CandidateTys) { 1856 assert(VTy->getElementType() == CommonEltTy && 1857 "Unaccounted for element type!"); 1858 assert(VTy == CandidateTys[0] && 1859 "Different vector types with the same element type!"); 1860 } 1861 #endif 1862 CandidateTys.resize(1); 1863 } 1864 1865 // Try each vector type, and return the one which works. 1866 auto CheckVectorTypeForPromotion = [&](VectorType *VTy) { 1867 uint64_t ElementSize = DL.getTypeSizeInBits(VTy->getElementType()); 1868 1869 // While the definition of LLVM vectors is bitpacked, we don't support sizes 1870 // that aren't byte sized. 1871 if (ElementSize % 8) 1872 return false; 1873 assert((DL.getTypeSizeInBits(VTy) % 8) == 0 && 1874 "vector size not a multiple of element size?"); 1875 ElementSize /= 8; 1876 1877 for (const Slice &S : P) 1878 if (!isVectorPromotionViableForSlice(P, S, VTy, ElementSize, DL)) 1879 return false; 1880 1881 for (const Slice *S : P.splitSliceTails()) 1882 if (!isVectorPromotionViableForSlice(P, *S, VTy, ElementSize, DL)) 1883 return false; 1884 1885 return true; 1886 }; 1887 for (VectorType *VTy : CandidateTys) 1888 if (CheckVectorTypeForPromotion(VTy)) 1889 return VTy; 1890 1891 return nullptr; 1892 } 1893 1894 /// \brief Test whether a slice of an alloca is valid for integer widening. 1895 /// 1896 /// This implements the necessary checking for the \c isIntegerWideningViable 1897 /// test below on a single slice of the alloca. 1898 static bool isIntegerWideningViableForSlice(const Slice &S, 1899 uint64_t AllocBeginOffset, 1900 Type *AllocaTy, 1901 const DataLayout &DL, 1902 bool &WholeAllocaOp) { 1903 uint64_t Size = DL.getTypeStoreSize(AllocaTy); 1904 1905 uint64_t RelBegin = S.beginOffset() - AllocBeginOffset; 1906 uint64_t RelEnd = S.endOffset() - AllocBeginOffset; 1907 1908 // We can't reasonably handle cases where the load or store extends past 1909 // the end of the alloca's type and into its padding. 1910 if (RelEnd > Size) 1911 return false; 1912 1913 Use *U = S.getUse(); 1914 1915 if (LoadInst *LI = dyn_cast<LoadInst>(U->getUser())) { 1916 if (LI->isVolatile()) 1917 return false; 1918 // We can't handle loads that extend past the allocated memory. 1919 if (DL.getTypeStoreSize(LI->getType()) > Size) 1920 return false; 1921 // Note that we don't count vector loads or stores as whole-alloca 1922 // operations which enable integer widening because we would prefer to use 1923 // vector widening instead. 1924 if (!isa<VectorType>(LI->getType()) && RelBegin == 0 && RelEnd == Size) 1925 WholeAllocaOp = true; 1926 if (IntegerType *ITy = dyn_cast<IntegerType>(LI->getType())) { 1927 if (ITy->getBitWidth() < DL.getTypeStoreSizeInBits(ITy)) 1928 return false; 1929 } else if (RelBegin != 0 || RelEnd != Size || 1930 !canConvertValue(DL, AllocaTy, LI->getType())) { 1931 // Non-integer loads need to be convertible from the alloca type so that 1932 // they are promotable. 1933 return false; 1934 } 1935 } else if (StoreInst *SI = dyn_cast<StoreInst>(U->getUser())) { 1936 Type *ValueTy = SI->getValueOperand()->getType(); 1937 if (SI->isVolatile()) 1938 return false; 1939 // We can't handle stores that extend past the allocated memory. 1940 if (DL.getTypeStoreSize(ValueTy) > Size) 1941 return false; 1942 // Note that we don't count vector loads or stores as whole-alloca 1943 // operations which enable integer widening because we would prefer to use 1944 // vector widening instead. 1945 if (!isa<VectorType>(ValueTy) && RelBegin == 0 && RelEnd == Size) 1946 WholeAllocaOp = true; 1947 if (IntegerType *ITy = dyn_cast<IntegerType>(ValueTy)) { 1948 if (ITy->getBitWidth() < DL.getTypeStoreSizeInBits(ITy)) 1949 return false; 1950 } else if (RelBegin != 0 || RelEnd != Size || 1951 !canConvertValue(DL, ValueTy, AllocaTy)) { 1952 // Non-integer stores need to be convertible to the alloca type so that 1953 // they are promotable. 1954 return false; 1955 } 1956 } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(U->getUser())) { 1957 if (MI->isVolatile() || !isa<Constant>(MI->getLength())) 1958 return false; 1959 if (!S.isSplittable()) 1960 return false; // Skip any unsplittable intrinsics. 1961 } else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(U->getUser())) { 1962 if (II->getIntrinsicID() != Intrinsic::lifetime_start && 1963 II->getIntrinsicID() != Intrinsic::lifetime_end) 1964 return false; 1965 } else { 1966 return false; 1967 } 1968 1969 return true; 1970 } 1971 1972 /// \brief Test whether the given alloca partition's integer operations can be 1973 /// widened to promotable ones. 1974 /// 1975 /// This is a quick test to check whether we can rewrite the integer loads and 1976 /// stores to a particular alloca into wider loads and stores and be able to 1977 /// promote the resulting alloca. 1978 static bool isIntegerWideningViable(Partition &P, Type *AllocaTy, 1979 const DataLayout &DL) { 1980 uint64_t SizeInBits = DL.getTypeSizeInBits(AllocaTy); 1981 // Don't create integer types larger than the maximum bitwidth. 1982 if (SizeInBits > IntegerType::MAX_INT_BITS) 1983 return false; 1984 1985 // Don't try to handle allocas with bit-padding. 1986 if (SizeInBits != DL.getTypeStoreSizeInBits(AllocaTy)) 1987 return false; 1988 1989 // We need to ensure that an integer type with the appropriate bitwidth can 1990 // be converted to the alloca type, whatever that is. We don't want to force 1991 // the alloca itself to have an integer type if there is a more suitable one. 1992 Type *IntTy = Type::getIntNTy(AllocaTy->getContext(), SizeInBits); 1993 if (!canConvertValue(DL, AllocaTy, IntTy) || 1994 !canConvertValue(DL, IntTy, AllocaTy)) 1995 return false; 1996 1997 // While examining uses, we ensure that the alloca has a covering load or 1998 // store. We don't want to widen the integer operations only to fail to 1999 // promote due to some other unsplittable entry (which we may make splittable 2000 // later). However, if there are only splittable uses, go ahead and assume 2001 // that we cover the alloca. 2002 // FIXME: We shouldn't consider split slices that happen to start in the 2003 // partition here... 2004 bool WholeAllocaOp = 2005 P.begin() != P.end() ? false : DL.isLegalInteger(SizeInBits); 2006 2007 for (const Slice &S : P) 2008 if (!isIntegerWideningViableForSlice(S, P.beginOffset(), AllocaTy, DL, 2009 WholeAllocaOp)) 2010 return false; 2011 2012 for (const Slice *S : P.splitSliceTails()) 2013 if (!isIntegerWideningViableForSlice(*S, P.beginOffset(), AllocaTy, DL, 2014 WholeAllocaOp)) 2015 return false; 2016 2017 return WholeAllocaOp; 2018 } 2019 2020 static Value *extractInteger(const DataLayout &DL, IRBuilderTy &IRB, Value *V, 2021 IntegerType *Ty, uint64_t Offset, 2022 const Twine &Name) { 2023 DEBUG(dbgs() << " start: " << *V << "\n"); 2024 IntegerType *IntTy = cast<IntegerType>(V->getType()); 2025 assert(DL.getTypeStoreSize(Ty) + Offset <= DL.getTypeStoreSize(IntTy) && 2026 "Element extends past full value"); 2027 uint64_t ShAmt = 8 * Offset; 2028 if (DL.isBigEndian()) 2029 ShAmt = 8 * (DL.getTypeStoreSize(IntTy) - DL.getTypeStoreSize(Ty) - Offset); 2030 if (ShAmt) { 2031 V = IRB.CreateLShr(V, ShAmt, Name + ".shift"); 2032 DEBUG(dbgs() << " shifted: " << *V << "\n"); 2033 } 2034 assert(Ty->getBitWidth() <= IntTy->getBitWidth() && 2035 "Cannot extract to a larger integer!"); 2036 if (Ty != IntTy) { 2037 V = IRB.CreateTrunc(V, Ty, Name + ".trunc"); 2038 DEBUG(dbgs() << " trunced: " << *V << "\n"); 2039 } 2040 return V; 2041 } 2042 2043 static Value *insertInteger(const DataLayout &DL, IRBuilderTy &IRB, Value *Old, 2044 Value *V, uint64_t Offset, const Twine &Name) { 2045 IntegerType *IntTy = cast<IntegerType>(Old->getType()); 2046 IntegerType *Ty = cast<IntegerType>(V->getType()); 2047 assert(Ty->getBitWidth() <= IntTy->getBitWidth() && 2048 "Cannot insert a larger integer!"); 2049 DEBUG(dbgs() << " start: " << *V << "\n"); 2050 if (Ty != IntTy) { 2051 V = IRB.CreateZExt(V, IntTy, Name + ".ext"); 2052 DEBUG(dbgs() << " extended: " << *V << "\n"); 2053 } 2054 assert(DL.getTypeStoreSize(Ty) + Offset <= DL.getTypeStoreSize(IntTy) && 2055 "Element store outside of alloca store"); 2056 uint64_t ShAmt = 8 * Offset; 2057 if (DL.isBigEndian()) 2058 ShAmt = 8 * (DL.getTypeStoreSize(IntTy) - DL.getTypeStoreSize(Ty) - Offset); 2059 if (ShAmt) { 2060 V = IRB.CreateShl(V, ShAmt, Name + ".shift"); 2061 DEBUG(dbgs() << " shifted: " << *V << "\n"); 2062 } 2063 2064 if (ShAmt || Ty->getBitWidth() < IntTy->getBitWidth()) { 2065 APInt Mask = ~Ty->getMask().zext(IntTy->getBitWidth()).shl(ShAmt); 2066 Old = IRB.CreateAnd(Old, Mask, Name + ".mask"); 2067 DEBUG(dbgs() << " masked: " << *Old << "\n"); 2068 V = IRB.CreateOr(Old, V, Name + ".insert"); 2069 DEBUG(dbgs() << " inserted: " << *V << "\n"); 2070 } 2071 return V; 2072 } 2073 2074 static Value *extractVector(IRBuilderTy &IRB, Value *V, unsigned BeginIndex, 2075 unsigned EndIndex, const Twine &Name) { 2076 VectorType *VecTy = cast<VectorType>(V->getType()); 2077 unsigned NumElements = EndIndex - BeginIndex; 2078 assert(NumElements <= VecTy->getNumElements() && "Too many elements!"); 2079 2080 if (NumElements == VecTy->getNumElements()) 2081 return V; 2082 2083 if (NumElements == 1) { 2084 V = IRB.CreateExtractElement(V, IRB.getInt32(BeginIndex), 2085 Name + ".extract"); 2086 DEBUG(dbgs() << " extract: " << *V << "\n"); 2087 return V; 2088 } 2089 2090 SmallVector<Constant *, 8> Mask; 2091 Mask.reserve(NumElements); 2092 for (unsigned i = BeginIndex; i != EndIndex; ++i) 2093 Mask.push_back(IRB.getInt32(i)); 2094 V = IRB.CreateShuffleVector(V, UndefValue::get(V->getType()), 2095 ConstantVector::get(Mask), Name + ".extract"); 2096 DEBUG(dbgs() << " shuffle: " << *V << "\n"); 2097 return V; 2098 } 2099 2100 static Value *insertVector(IRBuilderTy &IRB, Value *Old, Value *V, 2101 unsigned BeginIndex, const Twine &Name) { 2102 VectorType *VecTy = cast<VectorType>(Old->getType()); 2103 assert(VecTy && "Can only insert a vector into a vector"); 2104 2105 VectorType *Ty = dyn_cast<VectorType>(V->getType()); 2106 if (!Ty) { 2107 // Single element to insert. 2108 V = IRB.CreateInsertElement(Old, V, IRB.getInt32(BeginIndex), 2109 Name + ".insert"); 2110 DEBUG(dbgs() << " insert: " << *V << "\n"); 2111 return V; 2112 } 2113 2114 assert(Ty->getNumElements() <= VecTy->getNumElements() && 2115 "Too many elements!"); 2116 if (Ty->getNumElements() == VecTy->getNumElements()) { 2117 assert(V->getType() == VecTy && "Vector type mismatch"); 2118 return V; 2119 } 2120 unsigned EndIndex = BeginIndex + Ty->getNumElements(); 2121 2122 // When inserting a smaller vector into the larger to store, we first 2123 // use a shuffle vector to widen it with undef elements, and then 2124 // a second shuffle vector to select between the loaded vector and the 2125 // incoming vector. 2126 SmallVector<Constant *, 8> Mask; 2127 Mask.reserve(VecTy->getNumElements()); 2128 for (unsigned i = 0; i != VecTy->getNumElements(); ++i) 2129 if (i >= BeginIndex && i < EndIndex) 2130 Mask.push_back(IRB.getInt32(i - BeginIndex)); 2131 else 2132 Mask.push_back(UndefValue::get(IRB.getInt32Ty())); 2133 V = IRB.CreateShuffleVector(V, UndefValue::get(V->getType()), 2134 ConstantVector::get(Mask), Name + ".expand"); 2135 DEBUG(dbgs() << " shuffle: " << *V << "\n"); 2136 2137 Mask.clear(); 2138 for (unsigned i = 0; i != VecTy->getNumElements(); ++i) 2139 Mask.push_back(IRB.getInt1(i >= BeginIndex && i < EndIndex)); 2140 2141 V = IRB.CreateSelect(ConstantVector::get(Mask), V, Old, Name + "blend"); 2142 2143 DEBUG(dbgs() << " blend: " << *V << "\n"); 2144 return V; 2145 } 2146 2147 /// \brief Visitor to rewrite instructions using p particular slice of an alloca 2148 /// to use a new alloca. 2149 /// 2150 /// Also implements the rewriting to vector-based accesses when the partition 2151 /// passes the isVectorPromotionViable predicate. Most of the rewriting logic 2152 /// lives here. 2153 class llvm::sroa::AllocaSliceRewriter 2154 : public InstVisitor<AllocaSliceRewriter, bool> { 2155 // Befriend the base class so it can delegate to private visit methods. 2156 friend class llvm::InstVisitor<AllocaSliceRewriter, bool>; 2157 typedef llvm::InstVisitor<AllocaSliceRewriter, bool> Base; 2158 2159 const DataLayout &DL; 2160 AllocaSlices &AS; 2161 SROA &Pass; 2162 AllocaInst &OldAI, &NewAI; 2163 const uint64_t NewAllocaBeginOffset, NewAllocaEndOffset; 2164 Type *NewAllocaTy; 2165 2166 // This is a convenience and flag variable that will be null unless the new 2167 // alloca's integer operations should be widened to this integer type due to 2168 // passing isIntegerWideningViable above. If it is non-null, the desired 2169 // integer type will be stored here for easy access during rewriting. 2170 IntegerType *IntTy; 2171 2172 // If we are rewriting an alloca partition which can be written as pure 2173 // vector operations, we stash extra information here. When VecTy is 2174 // non-null, we have some strict guarantees about the rewritten alloca: 2175 // - The new alloca is exactly the size of the vector type here. 2176 // - The accesses all either map to the entire vector or to a single 2177 // element. 2178 // - The set of accessing instructions is only one of those handled above 2179 // in isVectorPromotionViable. Generally these are the same access kinds 2180 // which are promotable via mem2reg. 2181 VectorType *VecTy; 2182 Type *ElementTy; 2183 uint64_t ElementSize; 2184 2185 // The original offset of the slice currently being rewritten relative to 2186 // the original alloca. 2187 uint64_t BeginOffset, EndOffset; 2188 // The new offsets of the slice currently being rewritten relative to the 2189 // original alloca. 2190 uint64_t NewBeginOffset, NewEndOffset; 2191 2192 uint64_t SliceSize; 2193 bool IsSplittable; 2194 bool IsSplit; 2195 Use *OldUse; 2196 Instruction *OldPtr; 2197 2198 // Track post-rewrite users which are PHI nodes and Selects. 2199 SmallSetVector<PHINode *, 8> &PHIUsers; 2200 SmallSetVector<SelectInst *, 8> &SelectUsers; 2201 2202 // Utility IR builder, whose name prefix is setup for each visited use, and 2203 // the insertion point is set to point to the user. 2204 IRBuilderTy IRB; 2205 2206 public: 2207 AllocaSliceRewriter(const DataLayout &DL, AllocaSlices &AS, SROA &Pass, 2208 AllocaInst &OldAI, AllocaInst &NewAI, 2209 uint64_t NewAllocaBeginOffset, 2210 uint64_t NewAllocaEndOffset, bool IsIntegerPromotable, 2211 VectorType *PromotableVecTy, 2212 SmallSetVector<PHINode *, 8> &PHIUsers, 2213 SmallSetVector<SelectInst *, 8> &SelectUsers) 2214 : DL(DL), AS(AS), Pass(Pass), OldAI(OldAI), NewAI(NewAI), 2215 NewAllocaBeginOffset(NewAllocaBeginOffset), 2216 NewAllocaEndOffset(NewAllocaEndOffset), 2217 NewAllocaTy(NewAI.getAllocatedType()), 2218 IntTy(IsIntegerPromotable 2219 ? Type::getIntNTy( 2220 NewAI.getContext(), 2221 DL.getTypeSizeInBits(NewAI.getAllocatedType())) 2222 : nullptr), 2223 VecTy(PromotableVecTy), 2224 ElementTy(VecTy ? VecTy->getElementType() : nullptr), 2225 ElementSize(VecTy ? DL.getTypeSizeInBits(ElementTy) / 8 : 0), 2226 BeginOffset(), EndOffset(), IsSplittable(), IsSplit(), OldUse(), 2227 OldPtr(), PHIUsers(PHIUsers), SelectUsers(SelectUsers), 2228 IRB(NewAI.getContext(), ConstantFolder()) { 2229 if (VecTy) { 2230 assert((DL.getTypeSizeInBits(ElementTy) % 8) == 0 && 2231 "Only multiple-of-8 sized vector elements are viable"); 2232 ++NumVectorized; 2233 } 2234 assert((!IntTy && !VecTy) || (IntTy && !VecTy) || (!IntTy && VecTy)); 2235 } 2236 2237 bool visit(AllocaSlices::const_iterator I) { 2238 bool CanSROA = true; 2239 BeginOffset = I->beginOffset(); 2240 EndOffset = I->endOffset(); 2241 IsSplittable = I->isSplittable(); 2242 IsSplit = 2243 BeginOffset < NewAllocaBeginOffset || EndOffset > NewAllocaEndOffset; 2244 DEBUG(dbgs() << " rewriting " << (IsSplit ? "split " : "")); 2245 DEBUG(AS.printSlice(dbgs(), I, "")); 2246 DEBUG(dbgs() << "\n"); 2247 2248 // Compute the intersecting offset range. 2249 assert(BeginOffset < NewAllocaEndOffset); 2250 assert(EndOffset > NewAllocaBeginOffset); 2251 NewBeginOffset = std::max(BeginOffset, NewAllocaBeginOffset); 2252 NewEndOffset = std::min(EndOffset, NewAllocaEndOffset); 2253 2254 SliceSize = NewEndOffset - NewBeginOffset; 2255 2256 OldUse = I->getUse(); 2257 OldPtr = cast<Instruction>(OldUse->get()); 2258 2259 Instruction *OldUserI = cast<Instruction>(OldUse->getUser()); 2260 IRB.SetInsertPoint(OldUserI); 2261 IRB.SetCurrentDebugLocation(OldUserI->getDebugLoc()); 2262 IRB.SetNamePrefix(Twine(NewAI.getName()) + "." + Twine(BeginOffset) + "."); 2263 2264 CanSROA &= visit(cast<Instruction>(OldUse->getUser())); 2265 if (VecTy || IntTy) 2266 assert(CanSROA); 2267 return CanSROA; 2268 } 2269 2270 private: 2271 // Make sure the other visit overloads are visible. 2272 using Base::visit; 2273 2274 // Every instruction which can end up as a user must have a rewrite rule. 2275 bool visitInstruction(Instruction &I) { 2276 DEBUG(dbgs() << " !!!! Cannot rewrite: " << I << "\n"); 2277 llvm_unreachable("No rewrite rule for this instruction!"); 2278 } 2279 2280 Value *getNewAllocaSlicePtr(IRBuilderTy &IRB, Type *PointerTy) { 2281 // Note that the offset computation can use BeginOffset or NewBeginOffset 2282 // interchangeably for unsplit slices. 2283 assert(IsSplit || BeginOffset == NewBeginOffset); 2284 uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset; 2285 2286 #ifndef NDEBUG 2287 StringRef OldName = OldPtr->getName(); 2288 // Skip through the last '.sroa.' component of the name. 2289 size_t LastSROAPrefix = OldName.rfind(".sroa."); 2290 if (LastSROAPrefix != StringRef::npos) { 2291 OldName = OldName.substr(LastSROAPrefix + strlen(".sroa.")); 2292 // Look for an SROA slice index. 2293 size_t IndexEnd = OldName.find_first_not_of("0123456789"); 2294 if (IndexEnd != StringRef::npos && OldName[IndexEnd] == '.') { 2295 // Strip the index and look for the offset. 2296 OldName = OldName.substr(IndexEnd + 1); 2297 size_t OffsetEnd = OldName.find_first_not_of("0123456789"); 2298 if (OffsetEnd != StringRef::npos && OldName[OffsetEnd] == '.') 2299 // Strip the offset. 2300 OldName = OldName.substr(OffsetEnd + 1); 2301 } 2302 } 2303 // Strip any SROA suffixes as well. 2304 OldName = OldName.substr(0, OldName.find(".sroa_")); 2305 #endif 2306 2307 return getAdjustedPtr(IRB, DL, &NewAI, 2308 APInt(DL.getPointerTypeSizeInBits(PointerTy), Offset), 2309 PointerTy, 2310 #ifndef NDEBUG 2311 Twine(OldName) + "." 2312 #else 2313 Twine() 2314 #endif 2315 ); 2316 } 2317 2318 /// \brief Compute suitable alignment to access this slice of the *new* 2319 /// alloca. 2320 /// 2321 /// You can optionally pass a type to this routine and if that type's ABI 2322 /// alignment is itself suitable, this will return zero. 2323 unsigned getSliceAlign(Type *Ty = nullptr) { 2324 unsigned NewAIAlign = NewAI.getAlignment(); 2325 if (!NewAIAlign) 2326 NewAIAlign = DL.getABITypeAlignment(NewAI.getAllocatedType()); 2327 unsigned Align = 2328 MinAlign(NewAIAlign, NewBeginOffset - NewAllocaBeginOffset); 2329 return (Ty && Align == DL.getABITypeAlignment(Ty)) ? 0 : Align; 2330 } 2331 2332 unsigned getIndex(uint64_t Offset) { 2333 assert(VecTy && "Can only call getIndex when rewriting a vector"); 2334 uint64_t RelOffset = Offset - NewAllocaBeginOffset; 2335 assert(RelOffset / ElementSize < UINT32_MAX && "Index out of bounds"); 2336 uint32_t Index = RelOffset / ElementSize; 2337 assert(Index * ElementSize == RelOffset); 2338 return Index; 2339 } 2340 2341 void deleteIfTriviallyDead(Value *V) { 2342 Instruction *I = cast<Instruction>(V); 2343 if (isInstructionTriviallyDead(I)) 2344 Pass.DeadInsts.insert(I); 2345 } 2346 2347 Value *rewriteVectorizedLoadInst() { 2348 unsigned BeginIndex = getIndex(NewBeginOffset); 2349 unsigned EndIndex = getIndex(NewEndOffset); 2350 assert(EndIndex > BeginIndex && "Empty vector!"); 2351 2352 Value *V = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), "load"); 2353 return extractVector(IRB, V, BeginIndex, EndIndex, "vec"); 2354 } 2355 2356 Value *rewriteIntegerLoad(LoadInst &LI) { 2357 assert(IntTy && "We cannot insert an integer to the alloca"); 2358 assert(!LI.isVolatile()); 2359 Value *V = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), "load"); 2360 V = convertValue(DL, IRB, V, IntTy); 2361 assert(NewBeginOffset >= NewAllocaBeginOffset && "Out of bounds offset"); 2362 uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset; 2363 if (Offset > 0 || NewEndOffset < NewAllocaEndOffset) { 2364 IntegerType *ExtractTy = Type::getIntNTy(LI.getContext(), SliceSize * 8); 2365 V = extractInteger(DL, IRB, V, ExtractTy, Offset, "extract"); 2366 } 2367 // It is possible that the extracted type is not the load type. This 2368 // happens if there is a load past the end of the alloca, and as 2369 // a consequence the slice is narrower but still a candidate for integer 2370 // lowering. To handle this case, we just zero extend the extracted 2371 // integer. 2372 assert(cast<IntegerType>(LI.getType())->getBitWidth() >= SliceSize * 8 && 2373 "Can only handle an extract for an overly wide load"); 2374 if (cast<IntegerType>(LI.getType())->getBitWidth() > SliceSize * 8) 2375 V = IRB.CreateZExt(V, LI.getType()); 2376 return V; 2377 } 2378 2379 bool visitLoadInst(LoadInst &LI) { 2380 DEBUG(dbgs() << " original: " << LI << "\n"); 2381 Value *OldOp = LI.getOperand(0); 2382 assert(OldOp == OldPtr); 2383 2384 unsigned AS = LI.getPointerAddressSpace(); 2385 2386 Type *TargetTy = IsSplit ? Type::getIntNTy(LI.getContext(), SliceSize * 8) 2387 : LI.getType(); 2388 const bool IsLoadPastEnd = DL.getTypeStoreSize(TargetTy) > SliceSize; 2389 bool IsPtrAdjusted = false; 2390 Value *V; 2391 if (VecTy) { 2392 V = rewriteVectorizedLoadInst(); 2393 } else if (IntTy && LI.getType()->isIntegerTy()) { 2394 V = rewriteIntegerLoad(LI); 2395 } else if (NewBeginOffset == NewAllocaBeginOffset && 2396 NewEndOffset == NewAllocaEndOffset && 2397 (canConvertValue(DL, NewAllocaTy, TargetTy) || 2398 (IsLoadPastEnd && NewAllocaTy->isIntegerTy() && 2399 TargetTy->isIntegerTy()))) { 2400 LoadInst *NewLI = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), 2401 LI.isVolatile(), LI.getName()); 2402 if (LI.isVolatile()) 2403 NewLI->setAtomic(LI.getOrdering(), LI.getSynchScope()); 2404 2405 // Any !nonnull metadata or !range metadata on the old load is also valid 2406 // on the new load. This is even true in some cases even when the loads 2407 // are different types, for example by mapping !nonnull metadata to 2408 // !range metadata by modeling the null pointer constant converted to the 2409 // integer type. 2410 // FIXME: Add support for range metadata here. Currently the utilities 2411 // for this don't propagate range metadata in trivial cases from one 2412 // integer load to another, don't handle non-addrspace-0 null pointers 2413 // correctly, and don't have any support for mapping ranges as the 2414 // integer type becomes winder or narrower. 2415 if (MDNode *N = LI.getMetadata(LLVMContext::MD_nonnull)) 2416 copyNonnullMetadata(LI, N, *NewLI); 2417 2418 // Try to preserve nonnull metadata 2419 V = NewLI; 2420 2421 // If this is an integer load past the end of the slice (which means the 2422 // bytes outside the slice are undef or this load is dead) just forcibly 2423 // fix the integer size with correct handling of endianness. 2424 if (auto *AITy = dyn_cast<IntegerType>(NewAllocaTy)) 2425 if (auto *TITy = dyn_cast<IntegerType>(TargetTy)) 2426 if (AITy->getBitWidth() < TITy->getBitWidth()) { 2427 V = IRB.CreateZExt(V, TITy, "load.ext"); 2428 if (DL.isBigEndian()) 2429 V = IRB.CreateShl(V, TITy->getBitWidth() - AITy->getBitWidth(), 2430 "endian_shift"); 2431 } 2432 } else { 2433 Type *LTy = TargetTy->getPointerTo(AS); 2434 LoadInst *NewLI = IRB.CreateAlignedLoad(getNewAllocaSlicePtr(IRB, LTy), 2435 getSliceAlign(TargetTy), 2436 LI.isVolatile(), LI.getName()); 2437 if (LI.isVolatile()) 2438 NewLI->setAtomic(LI.getOrdering(), LI.getSynchScope()); 2439 2440 V = NewLI; 2441 IsPtrAdjusted = true; 2442 } 2443 V = convertValue(DL, IRB, V, TargetTy); 2444 2445 if (IsSplit) { 2446 assert(!LI.isVolatile()); 2447 assert(LI.getType()->isIntegerTy() && 2448 "Only integer type loads and stores are split"); 2449 assert(SliceSize < DL.getTypeStoreSize(LI.getType()) && 2450 "Split load isn't smaller than original load"); 2451 assert(LI.getType()->getIntegerBitWidth() == 2452 DL.getTypeStoreSizeInBits(LI.getType()) && 2453 "Non-byte-multiple bit width"); 2454 // Move the insertion point just past the load so that we can refer to it. 2455 IRB.SetInsertPoint(&*std::next(BasicBlock::iterator(&LI))); 2456 // Create a placeholder value with the same type as LI to use as the 2457 // basis for the new value. This allows us to replace the uses of LI with 2458 // the computed value, and then replace the placeholder with LI, leaving 2459 // LI only used for this computation. 2460 Value *Placeholder = 2461 new LoadInst(UndefValue::get(LI.getType()->getPointerTo(AS))); 2462 V = insertInteger(DL, IRB, Placeholder, V, NewBeginOffset - BeginOffset, 2463 "insert"); 2464 LI.replaceAllUsesWith(V); 2465 Placeholder->replaceAllUsesWith(&LI); 2466 Placeholder->deleteValue(); 2467 } else { 2468 LI.replaceAllUsesWith(V); 2469 } 2470 2471 Pass.DeadInsts.insert(&LI); 2472 deleteIfTriviallyDead(OldOp); 2473 DEBUG(dbgs() << " to: " << *V << "\n"); 2474 return !LI.isVolatile() && !IsPtrAdjusted; 2475 } 2476 2477 bool rewriteVectorizedStoreInst(Value *V, StoreInst &SI, Value *OldOp) { 2478 if (V->getType() != VecTy) { 2479 unsigned BeginIndex = getIndex(NewBeginOffset); 2480 unsigned EndIndex = getIndex(NewEndOffset); 2481 assert(EndIndex > BeginIndex && "Empty vector!"); 2482 unsigned NumElements = EndIndex - BeginIndex; 2483 assert(NumElements <= VecTy->getNumElements() && "Too many elements!"); 2484 Type *SliceTy = (NumElements == 1) 2485 ? ElementTy 2486 : VectorType::get(ElementTy, NumElements); 2487 if (V->getType() != SliceTy) 2488 V = convertValue(DL, IRB, V, SliceTy); 2489 2490 // Mix in the existing elements. 2491 Value *Old = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), "load"); 2492 V = insertVector(IRB, Old, V, BeginIndex, "vec"); 2493 } 2494 StoreInst *Store = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlignment()); 2495 Pass.DeadInsts.insert(&SI); 2496 2497 (void)Store; 2498 DEBUG(dbgs() << " to: " << *Store << "\n"); 2499 return true; 2500 } 2501 2502 bool rewriteIntegerStore(Value *V, StoreInst &SI) { 2503 assert(IntTy && "We cannot extract an integer from the alloca"); 2504 assert(!SI.isVolatile()); 2505 if (DL.getTypeSizeInBits(V->getType()) != IntTy->getBitWidth()) { 2506 Value *Old = 2507 IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), "oldload"); 2508 Old = convertValue(DL, IRB, Old, IntTy); 2509 assert(BeginOffset >= NewAllocaBeginOffset && "Out of bounds offset"); 2510 uint64_t Offset = BeginOffset - NewAllocaBeginOffset; 2511 V = insertInteger(DL, IRB, Old, SI.getValueOperand(), Offset, "insert"); 2512 } 2513 V = convertValue(DL, IRB, V, NewAllocaTy); 2514 StoreInst *Store = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlignment()); 2515 Store->copyMetadata(SI, LLVMContext::MD_mem_parallel_loop_access); 2516 Pass.DeadInsts.insert(&SI); 2517 DEBUG(dbgs() << " to: " << *Store << "\n"); 2518 return true; 2519 } 2520 2521 bool visitStoreInst(StoreInst &SI) { 2522 DEBUG(dbgs() << " original: " << SI << "\n"); 2523 Value *OldOp = SI.getOperand(1); 2524 assert(OldOp == OldPtr); 2525 2526 Value *V = SI.getValueOperand(); 2527 2528 // Strip all inbounds GEPs and pointer casts to try to dig out any root 2529 // alloca that should be re-examined after promoting this alloca. 2530 if (V->getType()->isPointerTy()) 2531 if (AllocaInst *AI = dyn_cast<AllocaInst>(V->stripInBoundsOffsets())) 2532 Pass.PostPromotionWorklist.insert(AI); 2533 2534 if (SliceSize < DL.getTypeStoreSize(V->getType())) { 2535 assert(!SI.isVolatile()); 2536 assert(V->getType()->isIntegerTy() && 2537 "Only integer type loads and stores are split"); 2538 assert(V->getType()->getIntegerBitWidth() == 2539 DL.getTypeStoreSizeInBits(V->getType()) && 2540 "Non-byte-multiple bit width"); 2541 IntegerType *NarrowTy = Type::getIntNTy(SI.getContext(), SliceSize * 8); 2542 V = extractInteger(DL, IRB, V, NarrowTy, NewBeginOffset - BeginOffset, 2543 "extract"); 2544 } 2545 2546 if (VecTy) 2547 return rewriteVectorizedStoreInst(V, SI, OldOp); 2548 if (IntTy && V->getType()->isIntegerTy()) 2549 return rewriteIntegerStore(V, SI); 2550 2551 const bool IsStorePastEnd = DL.getTypeStoreSize(V->getType()) > SliceSize; 2552 StoreInst *NewSI; 2553 if (NewBeginOffset == NewAllocaBeginOffset && 2554 NewEndOffset == NewAllocaEndOffset && 2555 (canConvertValue(DL, V->getType(), NewAllocaTy) || 2556 (IsStorePastEnd && NewAllocaTy->isIntegerTy() && 2557 V->getType()->isIntegerTy()))) { 2558 // If this is an integer store past the end of slice (and thus the bytes 2559 // past that point are irrelevant or this is unreachable), truncate the 2560 // value prior to storing. 2561 if (auto *VITy = dyn_cast<IntegerType>(V->getType())) 2562 if (auto *AITy = dyn_cast<IntegerType>(NewAllocaTy)) 2563 if (VITy->getBitWidth() > AITy->getBitWidth()) { 2564 if (DL.isBigEndian()) 2565 V = IRB.CreateLShr(V, VITy->getBitWidth() - AITy->getBitWidth(), 2566 "endian_shift"); 2567 V = IRB.CreateTrunc(V, AITy, "load.trunc"); 2568 } 2569 2570 V = convertValue(DL, IRB, V, NewAllocaTy); 2571 NewSI = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlignment(), 2572 SI.isVolatile()); 2573 } else { 2574 unsigned AS = SI.getPointerAddressSpace(); 2575 Value *NewPtr = getNewAllocaSlicePtr(IRB, V->getType()->getPointerTo(AS)); 2576 NewSI = IRB.CreateAlignedStore(V, NewPtr, getSliceAlign(V->getType()), 2577 SI.isVolatile()); 2578 } 2579 NewSI->copyMetadata(SI, LLVMContext::MD_mem_parallel_loop_access); 2580 if (SI.isVolatile()) 2581 NewSI->setAtomic(SI.getOrdering(), SI.getSynchScope()); 2582 Pass.DeadInsts.insert(&SI); 2583 deleteIfTriviallyDead(OldOp); 2584 2585 DEBUG(dbgs() << " to: " << *NewSI << "\n"); 2586 return NewSI->getPointerOperand() == &NewAI && !SI.isVolatile(); 2587 } 2588 2589 /// \brief Compute an integer value from splatting an i8 across the given 2590 /// number of bytes. 2591 /// 2592 /// Note that this routine assumes an i8 is a byte. If that isn't true, don't 2593 /// call this routine. 2594 /// FIXME: Heed the advice above. 2595 /// 2596 /// \param V The i8 value to splat. 2597 /// \param Size The number of bytes in the output (assuming i8 is one byte) 2598 Value *getIntegerSplat(Value *V, unsigned Size) { 2599 assert(Size > 0 && "Expected a positive number of bytes."); 2600 IntegerType *VTy = cast<IntegerType>(V->getType()); 2601 assert(VTy->getBitWidth() == 8 && "Expected an i8 value for the byte"); 2602 if (Size == 1) 2603 return V; 2604 2605 Type *SplatIntTy = Type::getIntNTy(VTy->getContext(), Size * 8); 2606 V = IRB.CreateMul( 2607 IRB.CreateZExt(V, SplatIntTy, "zext"), 2608 ConstantExpr::getUDiv( 2609 Constant::getAllOnesValue(SplatIntTy), 2610 ConstantExpr::getZExt(Constant::getAllOnesValue(V->getType()), 2611 SplatIntTy)), 2612 "isplat"); 2613 return V; 2614 } 2615 2616 /// \brief Compute a vector splat for a given element value. 2617 Value *getVectorSplat(Value *V, unsigned NumElements) { 2618 V = IRB.CreateVectorSplat(NumElements, V, "vsplat"); 2619 DEBUG(dbgs() << " splat: " << *V << "\n"); 2620 return V; 2621 } 2622 2623 bool visitMemSetInst(MemSetInst &II) { 2624 DEBUG(dbgs() << " original: " << II << "\n"); 2625 assert(II.getRawDest() == OldPtr); 2626 2627 // If the memset has a variable size, it cannot be split, just adjust the 2628 // pointer to the new alloca. 2629 if (!isa<Constant>(II.getLength())) { 2630 assert(!IsSplit); 2631 assert(NewBeginOffset == BeginOffset); 2632 II.setDest(getNewAllocaSlicePtr(IRB, OldPtr->getType())); 2633 Type *CstTy = II.getAlignmentCst()->getType(); 2634 II.setAlignment(ConstantInt::get(CstTy, getSliceAlign())); 2635 2636 deleteIfTriviallyDead(OldPtr); 2637 return false; 2638 } 2639 2640 // Record this instruction for deletion. 2641 Pass.DeadInsts.insert(&II); 2642 2643 Type *AllocaTy = NewAI.getAllocatedType(); 2644 Type *ScalarTy = AllocaTy->getScalarType(); 2645 2646 // If this doesn't map cleanly onto the alloca type, and that type isn't 2647 // a single value type, just emit a memset. 2648 if (!VecTy && !IntTy && 2649 (BeginOffset > NewAllocaBeginOffset || EndOffset < NewAllocaEndOffset || 2650 SliceSize != DL.getTypeStoreSize(AllocaTy) || 2651 !AllocaTy->isSingleValueType() || 2652 !DL.isLegalInteger(DL.getTypeSizeInBits(ScalarTy)) || 2653 DL.getTypeSizeInBits(ScalarTy) % 8 != 0)) { 2654 Type *SizeTy = II.getLength()->getType(); 2655 Constant *Size = ConstantInt::get(SizeTy, NewEndOffset - NewBeginOffset); 2656 CallInst *New = IRB.CreateMemSet( 2657 getNewAllocaSlicePtr(IRB, OldPtr->getType()), II.getValue(), Size, 2658 getSliceAlign(), II.isVolatile()); 2659 (void)New; 2660 DEBUG(dbgs() << " to: " << *New << "\n"); 2661 return false; 2662 } 2663 2664 // If we can represent this as a simple value, we have to build the actual 2665 // value to store, which requires expanding the byte present in memset to 2666 // a sensible representation for the alloca type. This is essentially 2667 // splatting the byte to a sufficiently wide integer, splatting it across 2668 // any desired vector width, and bitcasting to the final type. 2669 Value *V; 2670 2671 if (VecTy) { 2672 // If this is a memset of a vectorized alloca, insert it. 2673 assert(ElementTy == ScalarTy); 2674 2675 unsigned BeginIndex = getIndex(NewBeginOffset); 2676 unsigned EndIndex = getIndex(NewEndOffset); 2677 assert(EndIndex > BeginIndex && "Empty vector!"); 2678 unsigned NumElements = EndIndex - BeginIndex; 2679 assert(NumElements <= VecTy->getNumElements() && "Too many elements!"); 2680 2681 Value *Splat = 2682 getIntegerSplat(II.getValue(), DL.getTypeSizeInBits(ElementTy) / 8); 2683 Splat = convertValue(DL, IRB, Splat, ElementTy); 2684 if (NumElements > 1) 2685 Splat = getVectorSplat(Splat, NumElements); 2686 2687 Value *Old = 2688 IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), "oldload"); 2689 V = insertVector(IRB, Old, Splat, BeginIndex, "vec"); 2690 } else if (IntTy) { 2691 // If this is a memset on an alloca where we can widen stores, insert the 2692 // set integer. 2693 assert(!II.isVolatile()); 2694 2695 uint64_t Size = NewEndOffset - NewBeginOffset; 2696 V = getIntegerSplat(II.getValue(), Size); 2697 2698 if (IntTy && (BeginOffset != NewAllocaBeginOffset || 2699 EndOffset != NewAllocaBeginOffset)) { 2700 Value *Old = 2701 IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), "oldload"); 2702 Old = convertValue(DL, IRB, Old, IntTy); 2703 uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset; 2704 V = insertInteger(DL, IRB, Old, V, Offset, "insert"); 2705 } else { 2706 assert(V->getType() == IntTy && 2707 "Wrong type for an alloca wide integer!"); 2708 } 2709 V = convertValue(DL, IRB, V, AllocaTy); 2710 } else { 2711 // Established these invariants above. 2712 assert(NewBeginOffset == NewAllocaBeginOffset); 2713 assert(NewEndOffset == NewAllocaEndOffset); 2714 2715 V = getIntegerSplat(II.getValue(), DL.getTypeSizeInBits(ScalarTy) / 8); 2716 if (VectorType *AllocaVecTy = dyn_cast<VectorType>(AllocaTy)) 2717 V = getVectorSplat(V, AllocaVecTy->getNumElements()); 2718 2719 V = convertValue(DL, IRB, V, AllocaTy); 2720 } 2721 2722 Value *New = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlignment(), 2723 II.isVolatile()); 2724 (void)New; 2725 DEBUG(dbgs() << " to: " << *New << "\n"); 2726 return !II.isVolatile(); 2727 } 2728 2729 bool visitMemTransferInst(MemTransferInst &II) { 2730 // Rewriting of memory transfer instructions can be a bit tricky. We break 2731 // them into two categories: split intrinsics and unsplit intrinsics. 2732 2733 DEBUG(dbgs() << " original: " << II << "\n"); 2734 2735 bool IsDest = &II.getRawDestUse() == OldUse; 2736 assert((IsDest && II.getRawDest() == OldPtr) || 2737 (!IsDest && II.getRawSource() == OldPtr)); 2738 2739 unsigned SliceAlign = getSliceAlign(); 2740 2741 // For unsplit intrinsics, we simply modify the source and destination 2742 // pointers in place. This isn't just an optimization, it is a matter of 2743 // correctness. With unsplit intrinsics we may be dealing with transfers 2744 // within a single alloca before SROA ran, or with transfers that have 2745 // a variable length. We may also be dealing with memmove instead of 2746 // memcpy, and so simply updating the pointers is the necessary for us to 2747 // update both source and dest of a single call. 2748 if (!IsSplittable) { 2749 Value *AdjustedPtr = getNewAllocaSlicePtr(IRB, OldPtr->getType()); 2750 if (IsDest) 2751 II.setDest(AdjustedPtr); 2752 else 2753 II.setSource(AdjustedPtr); 2754 2755 if (II.getAlignment() > SliceAlign) { 2756 Type *CstTy = II.getAlignmentCst()->getType(); 2757 II.setAlignment( 2758 ConstantInt::get(CstTy, MinAlign(II.getAlignment(), SliceAlign))); 2759 } 2760 2761 DEBUG(dbgs() << " to: " << II << "\n"); 2762 deleteIfTriviallyDead(OldPtr); 2763 return false; 2764 } 2765 // For split transfer intrinsics we have an incredibly useful assurance: 2766 // the source and destination do not reside within the same alloca, and at 2767 // least one of them does not escape. This means that we can replace 2768 // memmove with memcpy, and we don't need to worry about all manner of 2769 // downsides to splitting and transforming the operations. 2770 2771 // If this doesn't map cleanly onto the alloca type, and that type isn't 2772 // a single value type, just emit a memcpy. 2773 bool EmitMemCpy = 2774 !VecTy && !IntTy && 2775 (BeginOffset > NewAllocaBeginOffset || EndOffset < NewAllocaEndOffset || 2776 SliceSize != DL.getTypeStoreSize(NewAI.getAllocatedType()) || 2777 !NewAI.getAllocatedType()->isSingleValueType()); 2778 2779 // If we're just going to emit a memcpy, the alloca hasn't changed, and the 2780 // size hasn't been shrunk based on analysis of the viable range, this is 2781 // a no-op. 2782 if (EmitMemCpy && &OldAI == &NewAI) { 2783 // Ensure the start lines up. 2784 assert(NewBeginOffset == BeginOffset); 2785 2786 // Rewrite the size as needed. 2787 if (NewEndOffset != EndOffset) 2788 II.setLength(ConstantInt::get(II.getLength()->getType(), 2789 NewEndOffset - NewBeginOffset)); 2790 return false; 2791 } 2792 // Record this instruction for deletion. 2793 Pass.DeadInsts.insert(&II); 2794 2795 // Strip all inbounds GEPs and pointer casts to try to dig out any root 2796 // alloca that should be re-examined after rewriting this instruction. 2797 Value *OtherPtr = IsDest ? II.getRawSource() : II.getRawDest(); 2798 if (AllocaInst *AI = 2799 dyn_cast<AllocaInst>(OtherPtr->stripInBoundsOffsets())) { 2800 assert(AI != &OldAI && AI != &NewAI && 2801 "Splittable transfers cannot reach the same alloca on both ends."); 2802 Pass.Worklist.insert(AI); 2803 } 2804 2805 Type *OtherPtrTy = OtherPtr->getType(); 2806 unsigned OtherAS = OtherPtrTy->getPointerAddressSpace(); 2807 2808 // Compute the relative offset for the other pointer within the transfer. 2809 unsigned IntPtrWidth = DL.getPointerSizeInBits(OtherAS); 2810 APInt OtherOffset(IntPtrWidth, NewBeginOffset - BeginOffset); 2811 unsigned OtherAlign = MinAlign(II.getAlignment() ? II.getAlignment() : 1, 2812 OtherOffset.zextOrTrunc(64).getZExtValue()); 2813 2814 if (EmitMemCpy) { 2815 // Compute the other pointer, folding as much as possible to produce 2816 // a single, simple GEP in most cases. 2817 OtherPtr = getAdjustedPtr(IRB, DL, OtherPtr, OtherOffset, OtherPtrTy, 2818 OtherPtr->getName() + "."); 2819 2820 Value *OurPtr = getNewAllocaSlicePtr(IRB, OldPtr->getType()); 2821 Type *SizeTy = II.getLength()->getType(); 2822 Constant *Size = ConstantInt::get(SizeTy, NewEndOffset - NewBeginOffset); 2823 2824 CallInst *New = IRB.CreateMemCpy( 2825 IsDest ? OurPtr : OtherPtr, IsDest ? OtherPtr : OurPtr, Size, 2826 MinAlign(SliceAlign, OtherAlign), II.isVolatile()); 2827 (void)New; 2828 DEBUG(dbgs() << " to: " << *New << "\n"); 2829 return false; 2830 } 2831 2832 bool IsWholeAlloca = NewBeginOffset == NewAllocaBeginOffset && 2833 NewEndOffset == NewAllocaEndOffset; 2834 uint64_t Size = NewEndOffset - NewBeginOffset; 2835 unsigned BeginIndex = VecTy ? getIndex(NewBeginOffset) : 0; 2836 unsigned EndIndex = VecTy ? getIndex(NewEndOffset) : 0; 2837 unsigned NumElements = EndIndex - BeginIndex; 2838 IntegerType *SubIntTy = 2839 IntTy ? Type::getIntNTy(IntTy->getContext(), Size * 8) : nullptr; 2840 2841 // Reset the other pointer type to match the register type we're going to 2842 // use, but using the address space of the original other pointer. 2843 if (VecTy && !IsWholeAlloca) { 2844 if (NumElements == 1) 2845 OtherPtrTy = VecTy->getElementType(); 2846 else 2847 OtherPtrTy = VectorType::get(VecTy->getElementType(), NumElements); 2848 2849 OtherPtrTy = OtherPtrTy->getPointerTo(OtherAS); 2850 } else if (IntTy && !IsWholeAlloca) { 2851 OtherPtrTy = SubIntTy->getPointerTo(OtherAS); 2852 } else { 2853 OtherPtrTy = NewAllocaTy->getPointerTo(OtherAS); 2854 } 2855 2856 Value *SrcPtr = getAdjustedPtr(IRB, DL, OtherPtr, OtherOffset, OtherPtrTy, 2857 OtherPtr->getName() + "."); 2858 unsigned SrcAlign = OtherAlign; 2859 Value *DstPtr = &NewAI; 2860 unsigned DstAlign = SliceAlign; 2861 if (!IsDest) { 2862 std::swap(SrcPtr, DstPtr); 2863 std::swap(SrcAlign, DstAlign); 2864 } 2865 2866 Value *Src; 2867 if (VecTy && !IsWholeAlloca && !IsDest) { 2868 Src = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), "load"); 2869 Src = extractVector(IRB, Src, BeginIndex, EndIndex, "vec"); 2870 } else if (IntTy && !IsWholeAlloca && !IsDest) { 2871 Src = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), "load"); 2872 Src = convertValue(DL, IRB, Src, IntTy); 2873 uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset; 2874 Src = extractInteger(DL, IRB, Src, SubIntTy, Offset, "extract"); 2875 } else { 2876 Src = 2877 IRB.CreateAlignedLoad(SrcPtr, SrcAlign, II.isVolatile(), "copyload"); 2878 } 2879 2880 if (VecTy && !IsWholeAlloca && IsDest) { 2881 Value *Old = 2882 IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), "oldload"); 2883 Src = insertVector(IRB, Old, Src, BeginIndex, "vec"); 2884 } else if (IntTy && !IsWholeAlloca && IsDest) { 2885 Value *Old = 2886 IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), "oldload"); 2887 Old = convertValue(DL, IRB, Old, IntTy); 2888 uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset; 2889 Src = insertInteger(DL, IRB, Old, Src, Offset, "insert"); 2890 Src = convertValue(DL, IRB, Src, NewAllocaTy); 2891 } 2892 2893 StoreInst *Store = cast<StoreInst>( 2894 IRB.CreateAlignedStore(Src, DstPtr, DstAlign, II.isVolatile())); 2895 (void)Store; 2896 DEBUG(dbgs() << " to: " << *Store << "\n"); 2897 return !II.isVolatile(); 2898 } 2899 2900 bool visitIntrinsicInst(IntrinsicInst &II) { 2901 assert(II.getIntrinsicID() == Intrinsic::lifetime_start || 2902 II.getIntrinsicID() == Intrinsic::lifetime_end); 2903 DEBUG(dbgs() << " original: " << II << "\n"); 2904 assert(II.getArgOperand(1) == OldPtr); 2905 2906 // Record this instruction for deletion. 2907 Pass.DeadInsts.insert(&II); 2908 2909 // Lifetime intrinsics are only promotable if they cover the whole alloca. 2910 // Therefore, we drop lifetime intrinsics which don't cover the whole 2911 // alloca. 2912 // (In theory, intrinsics which partially cover an alloca could be 2913 // promoted, but PromoteMemToReg doesn't handle that case.) 2914 // FIXME: Check whether the alloca is promotable before dropping the 2915 // lifetime intrinsics? 2916 if (NewBeginOffset != NewAllocaBeginOffset || 2917 NewEndOffset != NewAllocaEndOffset) 2918 return true; 2919 2920 ConstantInt *Size = 2921 ConstantInt::get(cast<IntegerType>(II.getArgOperand(0)->getType()), 2922 NewEndOffset - NewBeginOffset); 2923 Value *Ptr = getNewAllocaSlicePtr(IRB, OldPtr->getType()); 2924 Value *New; 2925 if (II.getIntrinsicID() == Intrinsic::lifetime_start) 2926 New = IRB.CreateLifetimeStart(Ptr, Size); 2927 else 2928 New = IRB.CreateLifetimeEnd(Ptr, Size); 2929 2930 (void)New; 2931 DEBUG(dbgs() << " to: " << *New << "\n"); 2932 2933 return true; 2934 } 2935 2936 bool visitPHINode(PHINode &PN) { 2937 DEBUG(dbgs() << " original: " << PN << "\n"); 2938 assert(BeginOffset >= NewAllocaBeginOffset && "PHIs are unsplittable"); 2939 assert(EndOffset <= NewAllocaEndOffset && "PHIs are unsplittable"); 2940 2941 // We would like to compute a new pointer in only one place, but have it be 2942 // as local as possible to the PHI. To do that, we re-use the location of 2943 // the old pointer, which necessarily must be in the right position to 2944 // dominate the PHI. 2945 IRBuilderTy PtrBuilder(IRB); 2946 if (isa<PHINode>(OldPtr)) 2947 PtrBuilder.SetInsertPoint(&*OldPtr->getParent()->getFirstInsertionPt()); 2948 else 2949 PtrBuilder.SetInsertPoint(OldPtr); 2950 PtrBuilder.SetCurrentDebugLocation(OldPtr->getDebugLoc()); 2951 2952 Value *NewPtr = getNewAllocaSlicePtr(PtrBuilder, OldPtr->getType()); 2953 // Replace the operands which were using the old pointer. 2954 std::replace(PN.op_begin(), PN.op_end(), cast<Value>(OldPtr), NewPtr); 2955 2956 DEBUG(dbgs() << " to: " << PN << "\n"); 2957 deleteIfTriviallyDead(OldPtr); 2958 2959 // PHIs can't be promoted on their own, but often can be speculated. We 2960 // check the speculation outside of the rewriter so that we see the 2961 // fully-rewritten alloca. 2962 PHIUsers.insert(&PN); 2963 return true; 2964 } 2965 2966 bool visitSelectInst(SelectInst &SI) { 2967 DEBUG(dbgs() << " original: " << SI << "\n"); 2968 assert((SI.getTrueValue() == OldPtr || SI.getFalseValue() == OldPtr) && 2969 "Pointer isn't an operand!"); 2970 assert(BeginOffset >= NewAllocaBeginOffset && "Selects are unsplittable"); 2971 assert(EndOffset <= NewAllocaEndOffset && "Selects are unsplittable"); 2972 2973 Value *NewPtr = getNewAllocaSlicePtr(IRB, OldPtr->getType()); 2974 // Replace the operands which were using the old pointer. 2975 if (SI.getOperand(1) == OldPtr) 2976 SI.setOperand(1, NewPtr); 2977 if (SI.getOperand(2) == OldPtr) 2978 SI.setOperand(2, NewPtr); 2979 2980 DEBUG(dbgs() << " to: " << SI << "\n"); 2981 deleteIfTriviallyDead(OldPtr); 2982 2983 // Selects can't be promoted on their own, but often can be speculated. We 2984 // check the speculation outside of the rewriter so that we see the 2985 // fully-rewritten alloca. 2986 SelectUsers.insert(&SI); 2987 return true; 2988 } 2989 }; 2990 2991 namespace { 2992 /// \brief Visitor to rewrite aggregate loads and stores as scalar. 2993 /// 2994 /// This pass aggressively rewrites all aggregate loads and stores on 2995 /// a particular pointer (or any pointer derived from it which we can identify) 2996 /// with scalar loads and stores. 2997 class AggLoadStoreRewriter : public InstVisitor<AggLoadStoreRewriter, bool> { 2998 // Befriend the base class so it can delegate to private visit methods. 2999 friend class llvm::InstVisitor<AggLoadStoreRewriter, bool>; 3000 3001 /// Queue of pointer uses to analyze and potentially rewrite. 3002 SmallVector<Use *, 8> Queue; 3003 3004 /// Set to prevent us from cycling with phi nodes and loops. 3005 SmallPtrSet<User *, 8> Visited; 3006 3007 /// The current pointer use being rewritten. This is used to dig up the used 3008 /// value (as opposed to the user). 3009 Use *U; 3010 3011 public: 3012 /// Rewrite loads and stores through a pointer and all pointers derived from 3013 /// it. 3014 bool rewrite(Instruction &I) { 3015 DEBUG(dbgs() << " Rewriting FCA loads and stores...\n"); 3016 enqueueUsers(I); 3017 bool Changed = false; 3018 while (!Queue.empty()) { 3019 U = Queue.pop_back_val(); 3020 Changed |= visit(cast<Instruction>(U->getUser())); 3021 } 3022 return Changed; 3023 } 3024 3025 private: 3026 /// Enqueue all the users of the given instruction for further processing. 3027 /// This uses a set to de-duplicate users. 3028 void enqueueUsers(Instruction &I) { 3029 for (Use &U : I.uses()) 3030 if (Visited.insert(U.getUser()).second) 3031 Queue.push_back(&U); 3032 } 3033 3034 // Conservative default is to not rewrite anything. 3035 bool visitInstruction(Instruction &I) { return false; } 3036 3037 /// \brief Generic recursive split emission class. 3038 template <typename Derived> class OpSplitter { 3039 protected: 3040 /// The builder used to form new instructions. 3041 IRBuilderTy IRB; 3042 /// The indices which to be used with insert- or extractvalue to select the 3043 /// appropriate value within the aggregate. 3044 SmallVector<unsigned, 4> Indices; 3045 /// The indices to a GEP instruction which will move Ptr to the correct slot 3046 /// within the aggregate. 3047 SmallVector<Value *, 4> GEPIndices; 3048 /// The base pointer of the original op, used as a base for GEPing the 3049 /// split operations. 3050 Value *Ptr; 3051 3052 /// Initialize the splitter with an insertion point, Ptr and start with a 3053 /// single zero GEP index. 3054 OpSplitter(Instruction *InsertionPoint, Value *Ptr) 3055 : IRB(InsertionPoint), GEPIndices(1, IRB.getInt32(0)), Ptr(Ptr) {} 3056 3057 public: 3058 /// \brief Generic recursive split emission routine. 3059 /// 3060 /// This method recursively splits an aggregate op (load or store) into 3061 /// scalar or vector ops. It splits recursively until it hits a single value 3062 /// and emits that single value operation via the template argument. 3063 /// 3064 /// The logic of this routine relies on GEPs and insertvalue and 3065 /// extractvalue all operating with the same fundamental index list, merely 3066 /// formatted differently (GEPs need actual values). 3067 /// 3068 /// \param Ty The type being split recursively into smaller ops. 3069 /// \param Agg The aggregate value being built up or stored, depending on 3070 /// whether this is splitting a load or a store respectively. 3071 void emitSplitOps(Type *Ty, Value *&Agg, const Twine &Name) { 3072 if (Ty->isSingleValueType()) 3073 return static_cast<Derived *>(this)->emitFunc(Ty, Agg, Name); 3074 3075 if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) { 3076 unsigned OldSize = Indices.size(); 3077 (void)OldSize; 3078 for (unsigned Idx = 0, Size = ATy->getNumElements(); Idx != Size; 3079 ++Idx) { 3080 assert(Indices.size() == OldSize && "Did not return to the old size"); 3081 Indices.push_back(Idx); 3082 GEPIndices.push_back(IRB.getInt32(Idx)); 3083 emitSplitOps(ATy->getElementType(), Agg, Name + "." + Twine(Idx)); 3084 GEPIndices.pop_back(); 3085 Indices.pop_back(); 3086 } 3087 return; 3088 } 3089 3090 if (StructType *STy = dyn_cast<StructType>(Ty)) { 3091 unsigned OldSize = Indices.size(); 3092 (void)OldSize; 3093 for (unsigned Idx = 0, Size = STy->getNumElements(); Idx != Size; 3094 ++Idx) { 3095 assert(Indices.size() == OldSize && "Did not return to the old size"); 3096 Indices.push_back(Idx); 3097 GEPIndices.push_back(IRB.getInt32(Idx)); 3098 emitSplitOps(STy->getElementType(Idx), Agg, Name + "." + Twine(Idx)); 3099 GEPIndices.pop_back(); 3100 Indices.pop_back(); 3101 } 3102 return; 3103 } 3104 3105 llvm_unreachable("Only arrays and structs are aggregate loadable types"); 3106 } 3107 }; 3108 3109 struct LoadOpSplitter : public OpSplitter<LoadOpSplitter> { 3110 LoadOpSplitter(Instruction *InsertionPoint, Value *Ptr) 3111 : OpSplitter<LoadOpSplitter>(InsertionPoint, Ptr) {} 3112 3113 /// Emit a leaf load of a single value. This is called at the leaves of the 3114 /// recursive emission to actually load values. 3115 void emitFunc(Type *Ty, Value *&Agg, const Twine &Name) { 3116 assert(Ty->isSingleValueType()); 3117 // Load the single value and insert it using the indices. 3118 Value *GEP = 3119 IRB.CreateInBoundsGEP(nullptr, Ptr, GEPIndices, Name + ".gep"); 3120 Value *Load = IRB.CreateLoad(GEP, Name + ".load"); 3121 Agg = IRB.CreateInsertValue(Agg, Load, Indices, Name + ".insert"); 3122 DEBUG(dbgs() << " to: " << *Load << "\n"); 3123 } 3124 }; 3125 3126 bool visitLoadInst(LoadInst &LI) { 3127 assert(LI.getPointerOperand() == *U); 3128 if (!LI.isSimple() || LI.getType()->isSingleValueType()) 3129 return false; 3130 3131 // We have an aggregate being loaded, split it apart. 3132 DEBUG(dbgs() << " original: " << LI << "\n"); 3133 LoadOpSplitter Splitter(&LI, *U); 3134 Value *V = UndefValue::get(LI.getType()); 3135 Splitter.emitSplitOps(LI.getType(), V, LI.getName() + ".fca"); 3136 LI.replaceAllUsesWith(V); 3137 LI.eraseFromParent(); 3138 return true; 3139 } 3140 3141 struct StoreOpSplitter : public OpSplitter<StoreOpSplitter> { 3142 StoreOpSplitter(Instruction *InsertionPoint, Value *Ptr) 3143 : OpSplitter<StoreOpSplitter>(InsertionPoint, Ptr) {} 3144 3145 /// Emit a leaf store of a single value. This is called at the leaves of the 3146 /// recursive emission to actually produce stores. 3147 void emitFunc(Type *Ty, Value *&Agg, const Twine &Name) { 3148 assert(Ty->isSingleValueType()); 3149 // Extract the single value and store it using the indices. 3150 // 3151 // The gep and extractvalue values are factored out of the CreateStore 3152 // call to make the output independent of the argument evaluation order. 3153 Value *ExtractValue = 3154 IRB.CreateExtractValue(Agg, Indices, Name + ".extract"); 3155 Value *InBoundsGEP = 3156 IRB.CreateInBoundsGEP(nullptr, Ptr, GEPIndices, Name + ".gep"); 3157 Value *Store = IRB.CreateStore(ExtractValue, InBoundsGEP); 3158 (void)Store; 3159 DEBUG(dbgs() << " to: " << *Store << "\n"); 3160 } 3161 }; 3162 3163 bool visitStoreInst(StoreInst &SI) { 3164 if (!SI.isSimple() || SI.getPointerOperand() != *U) 3165 return false; 3166 Value *V = SI.getValueOperand(); 3167 if (V->getType()->isSingleValueType()) 3168 return false; 3169 3170 // We have an aggregate being stored, split it apart. 3171 DEBUG(dbgs() << " original: " << SI << "\n"); 3172 StoreOpSplitter Splitter(&SI, *U); 3173 Splitter.emitSplitOps(V->getType(), V, V->getName() + ".fca"); 3174 SI.eraseFromParent(); 3175 return true; 3176 } 3177 3178 bool visitBitCastInst(BitCastInst &BC) { 3179 enqueueUsers(BC); 3180 return false; 3181 } 3182 3183 bool visitGetElementPtrInst(GetElementPtrInst &GEPI) { 3184 enqueueUsers(GEPI); 3185 return false; 3186 } 3187 3188 bool visitPHINode(PHINode &PN) { 3189 enqueueUsers(PN); 3190 return false; 3191 } 3192 3193 bool visitSelectInst(SelectInst &SI) { 3194 enqueueUsers(SI); 3195 return false; 3196 } 3197 }; 3198 } 3199 3200 /// \brief Strip aggregate type wrapping. 3201 /// 3202 /// This removes no-op aggregate types wrapping an underlying type. It will 3203 /// strip as many layers of types as it can without changing either the type 3204 /// size or the allocated size. 3205 static Type *stripAggregateTypeWrapping(const DataLayout &DL, Type *Ty) { 3206 if (Ty->isSingleValueType()) 3207 return Ty; 3208 3209 uint64_t AllocSize = DL.getTypeAllocSize(Ty); 3210 uint64_t TypeSize = DL.getTypeSizeInBits(Ty); 3211 3212 Type *InnerTy; 3213 if (ArrayType *ArrTy = dyn_cast<ArrayType>(Ty)) { 3214 InnerTy = ArrTy->getElementType(); 3215 } else if (StructType *STy = dyn_cast<StructType>(Ty)) { 3216 const StructLayout *SL = DL.getStructLayout(STy); 3217 unsigned Index = SL->getElementContainingOffset(0); 3218 InnerTy = STy->getElementType(Index); 3219 } else { 3220 return Ty; 3221 } 3222 3223 if (AllocSize > DL.getTypeAllocSize(InnerTy) || 3224 TypeSize > DL.getTypeSizeInBits(InnerTy)) 3225 return Ty; 3226 3227 return stripAggregateTypeWrapping(DL, InnerTy); 3228 } 3229 3230 /// \brief Try to find a partition of the aggregate type passed in for a given 3231 /// offset and size. 3232 /// 3233 /// This recurses through the aggregate type and tries to compute a subtype 3234 /// based on the offset and size. When the offset and size span a sub-section 3235 /// of an array, it will even compute a new array type for that sub-section, 3236 /// and the same for structs. 3237 /// 3238 /// Note that this routine is very strict and tries to find a partition of the 3239 /// type which produces the *exact* right offset and size. It is not forgiving 3240 /// when the size or offset cause either end of type-based partition to be off. 3241 /// Also, this is a best-effort routine. It is reasonable to give up and not 3242 /// return a type if necessary. 3243 static Type *getTypePartition(const DataLayout &DL, Type *Ty, uint64_t Offset, 3244 uint64_t Size) { 3245 if (Offset == 0 && DL.getTypeAllocSize(Ty) == Size) 3246 return stripAggregateTypeWrapping(DL, Ty); 3247 if (Offset > DL.getTypeAllocSize(Ty) || 3248 (DL.getTypeAllocSize(Ty) - Offset) < Size) 3249 return nullptr; 3250 3251 if (SequentialType *SeqTy = dyn_cast<SequentialType>(Ty)) { 3252 Type *ElementTy = SeqTy->getElementType(); 3253 uint64_t ElementSize = DL.getTypeAllocSize(ElementTy); 3254 uint64_t NumSkippedElements = Offset / ElementSize; 3255 if (NumSkippedElements >= SeqTy->getNumElements()) 3256 return nullptr; 3257 Offset -= NumSkippedElements * ElementSize; 3258 3259 // First check if we need to recurse. 3260 if (Offset > 0 || Size < ElementSize) { 3261 // Bail if the partition ends in a different array element. 3262 if ((Offset + Size) > ElementSize) 3263 return nullptr; 3264 // Recurse through the element type trying to peel off offset bytes. 3265 return getTypePartition(DL, ElementTy, Offset, Size); 3266 } 3267 assert(Offset == 0); 3268 3269 if (Size == ElementSize) 3270 return stripAggregateTypeWrapping(DL, ElementTy); 3271 assert(Size > ElementSize); 3272 uint64_t NumElements = Size / ElementSize; 3273 if (NumElements * ElementSize != Size) 3274 return nullptr; 3275 return ArrayType::get(ElementTy, NumElements); 3276 } 3277 3278 StructType *STy = dyn_cast<StructType>(Ty); 3279 if (!STy) 3280 return nullptr; 3281 3282 const StructLayout *SL = DL.getStructLayout(STy); 3283 if (Offset >= SL->getSizeInBytes()) 3284 return nullptr; 3285 uint64_t EndOffset = Offset + Size; 3286 if (EndOffset > SL->getSizeInBytes()) 3287 return nullptr; 3288 3289 unsigned Index = SL->getElementContainingOffset(Offset); 3290 Offset -= SL->getElementOffset(Index); 3291 3292 Type *ElementTy = STy->getElementType(Index); 3293 uint64_t ElementSize = DL.getTypeAllocSize(ElementTy); 3294 if (Offset >= ElementSize) 3295 return nullptr; // The offset points into alignment padding. 3296 3297 // See if any partition must be contained by the element. 3298 if (Offset > 0 || Size < ElementSize) { 3299 if ((Offset + Size) > ElementSize) 3300 return nullptr; 3301 return getTypePartition(DL, ElementTy, Offset, Size); 3302 } 3303 assert(Offset == 0); 3304 3305 if (Size == ElementSize) 3306 return stripAggregateTypeWrapping(DL, ElementTy); 3307 3308 StructType::element_iterator EI = STy->element_begin() + Index, 3309 EE = STy->element_end(); 3310 if (EndOffset < SL->getSizeInBytes()) { 3311 unsigned EndIndex = SL->getElementContainingOffset(EndOffset); 3312 if (Index == EndIndex) 3313 return nullptr; // Within a single element and its padding. 3314 3315 // Don't try to form "natural" types if the elements don't line up with the 3316 // expected size. 3317 // FIXME: We could potentially recurse down through the last element in the 3318 // sub-struct to find a natural end point. 3319 if (SL->getElementOffset(EndIndex) != EndOffset) 3320 return nullptr; 3321 3322 assert(Index < EndIndex); 3323 EE = STy->element_begin() + EndIndex; 3324 } 3325 3326 // Try to build up a sub-structure. 3327 StructType *SubTy = 3328 StructType::get(STy->getContext(), makeArrayRef(EI, EE), STy->isPacked()); 3329 const StructLayout *SubSL = DL.getStructLayout(SubTy); 3330 if (Size != SubSL->getSizeInBytes()) 3331 return nullptr; // The sub-struct doesn't have quite the size needed. 3332 3333 return SubTy; 3334 } 3335 3336 /// \brief Pre-split loads and stores to simplify rewriting. 3337 /// 3338 /// We want to break up the splittable load+store pairs as much as 3339 /// possible. This is important to do as a preprocessing step, as once we 3340 /// start rewriting the accesses to partitions of the alloca we lose the 3341 /// necessary information to correctly split apart paired loads and stores 3342 /// which both point into this alloca. The case to consider is something like 3343 /// the following: 3344 /// 3345 /// %a = alloca [12 x i8] 3346 /// %gep1 = getelementptr [12 x i8]* %a, i32 0, i32 0 3347 /// %gep2 = getelementptr [12 x i8]* %a, i32 0, i32 4 3348 /// %gep3 = getelementptr [12 x i8]* %a, i32 0, i32 8 3349 /// %iptr1 = bitcast i8* %gep1 to i64* 3350 /// %iptr2 = bitcast i8* %gep2 to i64* 3351 /// %fptr1 = bitcast i8* %gep1 to float* 3352 /// %fptr2 = bitcast i8* %gep2 to float* 3353 /// %fptr3 = bitcast i8* %gep3 to float* 3354 /// store float 0.0, float* %fptr1 3355 /// store float 1.0, float* %fptr2 3356 /// %v = load i64* %iptr1 3357 /// store i64 %v, i64* %iptr2 3358 /// %f1 = load float* %fptr2 3359 /// %f2 = load float* %fptr3 3360 /// 3361 /// Here we want to form 3 partitions of the alloca, each 4 bytes large, and 3362 /// promote everything so we recover the 2 SSA values that should have been 3363 /// there all along. 3364 /// 3365 /// \returns true if any changes are made. 3366 bool SROA::presplitLoadsAndStores(AllocaInst &AI, AllocaSlices &AS) { 3367 DEBUG(dbgs() << "Pre-splitting loads and stores\n"); 3368 3369 // Track the loads and stores which are candidates for pre-splitting here, in 3370 // the order they first appear during the partition scan. These give stable 3371 // iteration order and a basis for tracking which loads and stores we 3372 // actually split. 3373 SmallVector<LoadInst *, 4> Loads; 3374 SmallVector<StoreInst *, 4> Stores; 3375 3376 // We need to accumulate the splits required of each load or store where we 3377 // can find them via a direct lookup. This is important to cross-check loads 3378 // and stores against each other. We also track the slice so that we can kill 3379 // all the slices that end up split. 3380 struct SplitOffsets { 3381 Slice *S; 3382 std::vector<uint64_t> Splits; 3383 }; 3384 SmallDenseMap<Instruction *, SplitOffsets, 8> SplitOffsetsMap; 3385 3386 // Track loads out of this alloca which cannot, for any reason, be pre-split. 3387 // This is important as we also cannot pre-split stores of those loads! 3388 // FIXME: This is all pretty gross. It means that we can be more aggressive 3389 // in pre-splitting when the load feeding the store happens to come from 3390 // a separate alloca. Put another way, the effectiveness of SROA would be 3391 // decreased by a frontend which just concatenated all of its local allocas 3392 // into one big flat alloca. But defeating such patterns is exactly the job 3393 // SROA is tasked with! Sadly, to not have this discrepancy we would have 3394 // change store pre-splitting to actually force pre-splitting of the load 3395 // that feeds it *and all stores*. That makes pre-splitting much harder, but 3396 // maybe it would make it more principled? 3397 SmallPtrSet<LoadInst *, 8> UnsplittableLoads; 3398 3399 DEBUG(dbgs() << " Searching for candidate loads and stores\n"); 3400 for (auto &P : AS.partitions()) { 3401 for (Slice &S : P) { 3402 Instruction *I = cast<Instruction>(S.getUse()->getUser()); 3403 if (!S.isSplittable() || S.endOffset() <= P.endOffset()) { 3404 // If this is a load we have to track that it can't participate in any 3405 // pre-splitting. If this is a store of a load we have to track that 3406 // that load also can't participate in any pre-splitting. 3407 if (auto *LI = dyn_cast<LoadInst>(I)) 3408 UnsplittableLoads.insert(LI); 3409 else if (auto *SI = dyn_cast<StoreInst>(I)) 3410 if (auto *LI = dyn_cast<LoadInst>(SI->getValueOperand())) 3411 UnsplittableLoads.insert(LI); 3412 continue; 3413 } 3414 assert(P.endOffset() > S.beginOffset() && 3415 "Empty or backwards partition!"); 3416 3417 // Determine if this is a pre-splittable slice. 3418 if (auto *LI = dyn_cast<LoadInst>(I)) { 3419 assert(!LI->isVolatile() && "Cannot split volatile loads!"); 3420 3421 // The load must be used exclusively to store into other pointers for 3422 // us to be able to arbitrarily pre-split it. The stores must also be 3423 // simple to avoid changing semantics. 3424 auto IsLoadSimplyStored = [](LoadInst *LI) { 3425 for (User *LU : LI->users()) { 3426 auto *SI = dyn_cast<StoreInst>(LU); 3427 if (!SI || !SI->isSimple()) 3428 return false; 3429 } 3430 return true; 3431 }; 3432 if (!IsLoadSimplyStored(LI)) { 3433 UnsplittableLoads.insert(LI); 3434 continue; 3435 } 3436 3437 Loads.push_back(LI); 3438 } else if (auto *SI = dyn_cast<StoreInst>(I)) { 3439 if (S.getUse() != &SI->getOperandUse(SI->getPointerOperandIndex())) 3440 // Skip stores *of* pointers. FIXME: This shouldn't even be possible! 3441 continue; 3442 auto *StoredLoad = dyn_cast<LoadInst>(SI->getValueOperand()); 3443 if (!StoredLoad || !StoredLoad->isSimple()) 3444 continue; 3445 assert(!SI->isVolatile() && "Cannot split volatile stores!"); 3446 3447 Stores.push_back(SI); 3448 } else { 3449 // Other uses cannot be pre-split. 3450 continue; 3451 } 3452 3453 // Record the initial split. 3454 DEBUG(dbgs() << " Candidate: " << *I << "\n"); 3455 auto &Offsets = SplitOffsetsMap[I]; 3456 assert(Offsets.Splits.empty() && 3457 "Should not have splits the first time we see an instruction!"); 3458 Offsets.S = &S; 3459 Offsets.Splits.push_back(P.endOffset() - S.beginOffset()); 3460 } 3461 3462 // Now scan the already split slices, and add a split for any of them which 3463 // we're going to pre-split. 3464 for (Slice *S : P.splitSliceTails()) { 3465 auto SplitOffsetsMapI = 3466 SplitOffsetsMap.find(cast<Instruction>(S->getUse()->getUser())); 3467 if (SplitOffsetsMapI == SplitOffsetsMap.end()) 3468 continue; 3469 auto &Offsets = SplitOffsetsMapI->second; 3470 3471 assert(Offsets.S == S && "Found a mismatched slice!"); 3472 assert(!Offsets.Splits.empty() && 3473 "Cannot have an empty set of splits on the second partition!"); 3474 assert(Offsets.Splits.back() == 3475 P.beginOffset() - Offsets.S->beginOffset() && 3476 "Previous split does not end where this one begins!"); 3477 3478 // Record each split. The last partition's end isn't needed as the size 3479 // of the slice dictates that. 3480 if (S->endOffset() > P.endOffset()) 3481 Offsets.Splits.push_back(P.endOffset() - Offsets.S->beginOffset()); 3482 } 3483 } 3484 3485 // We may have split loads where some of their stores are split stores. For 3486 // such loads and stores, we can only pre-split them if their splits exactly 3487 // match relative to their starting offset. We have to verify this prior to 3488 // any rewriting. 3489 Stores.erase( 3490 remove_if(Stores, 3491 [&UnsplittableLoads, &SplitOffsetsMap](StoreInst *SI) { 3492 // Lookup the load we are storing in our map of split 3493 // offsets. 3494 auto *LI = cast<LoadInst>(SI->getValueOperand()); 3495 // If it was completely unsplittable, then we're done, 3496 // and this store can't be pre-split. 3497 if (UnsplittableLoads.count(LI)) 3498 return true; 3499 3500 auto LoadOffsetsI = SplitOffsetsMap.find(LI); 3501 if (LoadOffsetsI == SplitOffsetsMap.end()) 3502 return false; // Unrelated loads are definitely safe. 3503 auto &LoadOffsets = LoadOffsetsI->second; 3504 3505 // Now lookup the store's offsets. 3506 auto &StoreOffsets = SplitOffsetsMap[SI]; 3507 3508 // If the relative offsets of each split in the load and 3509 // store match exactly, then we can split them and we 3510 // don't need to remove them here. 3511 if (LoadOffsets.Splits == StoreOffsets.Splits) 3512 return false; 3513 3514 DEBUG(dbgs() << " Mismatched splits for load and store:\n" 3515 << " " << *LI << "\n" 3516 << " " << *SI << "\n"); 3517 3518 // We've found a store and load that we need to split 3519 // with mismatched relative splits. Just give up on them 3520 // and remove both instructions from our list of 3521 // candidates. 3522 UnsplittableLoads.insert(LI); 3523 return true; 3524 }), 3525 Stores.end()); 3526 // Now we have to go *back* through all the stores, because a later store may 3527 // have caused an earlier store's load to become unsplittable and if it is 3528 // unsplittable for the later store, then we can't rely on it being split in 3529 // the earlier store either. 3530 Stores.erase(remove_if(Stores, 3531 [&UnsplittableLoads](StoreInst *SI) { 3532 auto *LI = cast<LoadInst>(SI->getValueOperand()); 3533 return UnsplittableLoads.count(LI); 3534 }), 3535 Stores.end()); 3536 // Once we've established all the loads that can't be split for some reason, 3537 // filter any that made it into our list out. 3538 Loads.erase(remove_if(Loads, 3539 [&UnsplittableLoads](LoadInst *LI) { 3540 return UnsplittableLoads.count(LI); 3541 }), 3542 Loads.end()); 3543 3544 // If no loads or stores are left, there is no pre-splitting to be done for 3545 // this alloca. 3546 if (Loads.empty() && Stores.empty()) 3547 return false; 3548 3549 // From here on, we can't fail and will be building new accesses, so rig up 3550 // an IR builder. 3551 IRBuilderTy IRB(&AI); 3552 3553 // Collect the new slices which we will merge into the alloca slices. 3554 SmallVector<Slice, 4> NewSlices; 3555 3556 // Track any allocas we end up splitting loads and stores for so we iterate 3557 // on them. 3558 SmallPtrSet<AllocaInst *, 4> ResplitPromotableAllocas; 3559 3560 // At this point, we have collected all of the loads and stores we can 3561 // pre-split, and the specific splits needed for them. We actually do the 3562 // splitting in a specific order in order to handle when one of the loads in 3563 // the value operand to one of the stores. 3564 // 3565 // First, we rewrite all of the split loads, and just accumulate each split 3566 // load in a parallel structure. We also build the slices for them and append 3567 // them to the alloca slices. 3568 SmallDenseMap<LoadInst *, std::vector<LoadInst *>, 1> SplitLoadsMap; 3569 std::vector<LoadInst *> SplitLoads; 3570 const DataLayout &DL = AI.getModule()->getDataLayout(); 3571 for (LoadInst *LI : Loads) { 3572 SplitLoads.clear(); 3573 3574 IntegerType *Ty = cast<IntegerType>(LI->getType()); 3575 uint64_t LoadSize = Ty->getBitWidth() / 8; 3576 assert(LoadSize > 0 && "Cannot have a zero-sized integer load!"); 3577 3578 auto &Offsets = SplitOffsetsMap[LI]; 3579 assert(LoadSize == Offsets.S->endOffset() - Offsets.S->beginOffset() && 3580 "Slice size should always match load size exactly!"); 3581 uint64_t BaseOffset = Offsets.S->beginOffset(); 3582 assert(BaseOffset + LoadSize > BaseOffset && 3583 "Cannot represent alloca access size using 64-bit integers!"); 3584 3585 Instruction *BasePtr = cast<Instruction>(LI->getPointerOperand()); 3586 IRB.SetInsertPoint(LI); 3587 3588 DEBUG(dbgs() << " Splitting load: " << *LI << "\n"); 3589 3590 uint64_t PartOffset = 0, PartSize = Offsets.Splits.front(); 3591 int Idx = 0, Size = Offsets.Splits.size(); 3592 for (;;) { 3593 auto *PartTy = Type::getIntNTy(Ty->getContext(), PartSize * 8); 3594 auto AS = LI->getPointerAddressSpace(); 3595 auto *PartPtrTy = PartTy->getPointerTo(AS); 3596 LoadInst *PLoad = IRB.CreateAlignedLoad( 3597 getAdjustedPtr(IRB, DL, BasePtr, 3598 APInt(DL.getPointerSizeInBits(AS), PartOffset), 3599 PartPtrTy, BasePtr->getName() + "."), 3600 getAdjustedAlignment(LI, PartOffset, DL), /*IsVolatile*/ false, 3601 LI->getName()); 3602 PLoad->copyMetadata(*LI, LLVMContext::MD_mem_parallel_loop_access); 3603 3604 // Append this load onto the list of split loads so we can find it later 3605 // to rewrite the stores. 3606 SplitLoads.push_back(PLoad); 3607 3608 // Now build a new slice for the alloca. 3609 NewSlices.push_back( 3610 Slice(BaseOffset + PartOffset, BaseOffset + PartOffset + PartSize, 3611 &PLoad->getOperandUse(PLoad->getPointerOperandIndex()), 3612 /*IsSplittable*/ false)); 3613 DEBUG(dbgs() << " new slice [" << NewSlices.back().beginOffset() 3614 << ", " << NewSlices.back().endOffset() << "): " << *PLoad 3615 << "\n"); 3616 3617 // See if we've handled all the splits. 3618 if (Idx >= Size) 3619 break; 3620 3621 // Setup the next partition. 3622 PartOffset = Offsets.Splits[Idx]; 3623 ++Idx; 3624 PartSize = (Idx < Size ? Offsets.Splits[Idx] : LoadSize) - PartOffset; 3625 } 3626 3627 // Now that we have the split loads, do the slow walk over all uses of the 3628 // load and rewrite them as split stores, or save the split loads to use 3629 // below if the store is going to be split there anyways. 3630 bool DeferredStores = false; 3631 for (User *LU : LI->users()) { 3632 StoreInst *SI = cast<StoreInst>(LU); 3633 if (!Stores.empty() && SplitOffsetsMap.count(SI)) { 3634 DeferredStores = true; 3635 DEBUG(dbgs() << " Deferred splitting of store: " << *SI << "\n"); 3636 continue; 3637 } 3638 3639 Value *StoreBasePtr = SI->getPointerOperand(); 3640 IRB.SetInsertPoint(SI); 3641 3642 DEBUG(dbgs() << " Splitting store of load: " << *SI << "\n"); 3643 3644 for (int Idx = 0, Size = SplitLoads.size(); Idx < Size; ++Idx) { 3645 LoadInst *PLoad = SplitLoads[Idx]; 3646 uint64_t PartOffset = Idx == 0 ? 0 : Offsets.Splits[Idx - 1]; 3647 auto *PartPtrTy = 3648 PLoad->getType()->getPointerTo(SI->getPointerAddressSpace()); 3649 3650 auto AS = SI->getPointerAddressSpace(); 3651 StoreInst *PStore = IRB.CreateAlignedStore( 3652 PLoad, 3653 getAdjustedPtr(IRB, DL, StoreBasePtr, 3654 APInt(DL.getPointerSizeInBits(AS), PartOffset), 3655 PartPtrTy, StoreBasePtr->getName() + "."), 3656 getAdjustedAlignment(SI, PartOffset, DL), /*IsVolatile*/ false); 3657 PStore->copyMetadata(*LI, LLVMContext::MD_mem_parallel_loop_access); 3658 DEBUG(dbgs() << " +" << PartOffset << ":" << *PStore << "\n"); 3659 } 3660 3661 // We want to immediately iterate on any allocas impacted by splitting 3662 // this store, and we have to track any promotable alloca (indicated by 3663 // a direct store) as needing to be resplit because it is no longer 3664 // promotable. 3665 if (AllocaInst *OtherAI = dyn_cast<AllocaInst>(StoreBasePtr)) { 3666 ResplitPromotableAllocas.insert(OtherAI); 3667 Worklist.insert(OtherAI); 3668 } else if (AllocaInst *OtherAI = dyn_cast<AllocaInst>( 3669 StoreBasePtr->stripInBoundsOffsets())) { 3670 Worklist.insert(OtherAI); 3671 } 3672 3673 // Mark the original store as dead. 3674 DeadInsts.insert(SI); 3675 } 3676 3677 // Save the split loads if there are deferred stores among the users. 3678 if (DeferredStores) 3679 SplitLoadsMap.insert(std::make_pair(LI, std::move(SplitLoads))); 3680 3681 // Mark the original load as dead and kill the original slice. 3682 DeadInsts.insert(LI); 3683 Offsets.S->kill(); 3684 } 3685 3686 // Second, we rewrite all of the split stores. At this point, we know that 3687 // all loads from this alloca have been split already. For stores of such 3688 // loads, we can simply look up the pre-existing split loads. For stores of 3689 // other loads, we split those loads first and then write split stores of 3690 // them. 3691 for (StoreInst *SI : Stores) { 3692 auto *LI = cast<LoadInst>(SI->getValueOperand()); 3693 IntegerType *Ty = cast<IntegerType>(LI->getType()); 3694 uint64_t StoreSize = Ty->getBitWidth() / 8; 3695 assert(StoreSize > 0 && "Cannot have a zero-sized integer store!"); 3696 3697 auto &Offsets = SplitOffsetsMap[SI]; 3698 assert(StoreSize == Offsets.S->endOffset() - Offsets.S->beginOffset() && 3699 "Slice size should always match load size exactly!"); 3700 uint64_t BaseOffset = Offsets.S->beginOffset(); 3701 assert(BaseOffset + StoreSize > BaseOffset && 3702 "Cannot represent alloca access size using 64-bit integers!"); 3703 3704 Value *LoadBasePtr = LI->getPointerOperand(); 3705 Instruction *StoreBasePtr = cast<Instruction>(SI->getPointerOperand()); 3706 3707 DEBUG(dbgs() << " Splitting store: " << *SI << "\n"); 3708 3709 // Check whether we have an already split load. 3710 auto SplitLoadsMapI = SplitLoadsMap.find(LI); 3711 std::vector<LoadInst *> *SplitLoads = nullptr; 3712 if (SplitLoadsMapI != SplitLoadsMap.end()) { 3713 SplitLoads = &SplitLoadsMapI->second; 3714 assert(SplitLoads->size() == Offsets.Splits.size() + 1 && 3715 "Too few split loads for the number of splits in the store!"); 3716 } else { 3717 DEBUG(dbgs() << " of load: " << *LI << "\n"); 3718 } 3719 3720 uint64_t PartOffset = 0, PartSize = Offsets.Splits.front(); 3721 int Idx = 0, Size = Offsets.Splits.size(); 3722 for (;;) { 3723 auto *PartTy = Type::getIntNTy(Ty->getContext(), PartSize * 8); 3724 auto *LoadPartPtrTy = PartTy->getPointerTo(LI->getPointerAddressSpace()); 3725 auto *StorePartPtrTy = PartTy->getPointerTo(SI->getPointerAddressSpace()); 3726 3727 // Either lookup a split load or create one. 3728 LoadInst *PLoad; 3729 if (SplitLoads) { 3730 PLoad = (*SplitLoads)[Idx]; 3731 } else { 3732 IRB.SetInsertPoint(LI); 3733 auto AS = LI->getPointerAddressSpace(); 3734 PLoad = IRB.CreateAlignedLoad( 3735 getAdjustedPtr(IRB, DL, LoadBasePtr, 3736 APInt(DL.getPointerSizeInBits(AS), PartOffset), 3737 LoadPartPtrTy, LoadBasePtr->getName() + "."), 3738 getAdjustedAlignment(LI, PartOffset, DL), /*IsVolatile*/ false, 3739 LI->getName()); 3740 } 3741 3742 // And store this partition. 3743 IRB.SetInsertPoint(SI); 3744 auto AS = SI->getPointerAddressSpace(); 3745 StoreInst *PStore = IRB.CreateAlignedStore( 3746 PLoad, 3747 getAdjustedPtr(IRB, DL, StoreBasePtr, 3748 APInt(DL.getPointerSizeInBits(AS), PartOffset), 3749 StorePartPtrTy, StoreBasePtr->getName() + "."), 3750 getAdjustedAlignment(SI, PartOffset, DL), /*IsVolatile*/ false); 3751 3752 // Now build a new slice for the alloca. 3753 NewSlices.push_back( 3754 Slice(BaseOffset + PartOffset, BaseOffset + PartOffset + PartSize, 3755 &PStore->getOperandUse(PStore->getPointerOperandIndex()), 3756 /*IsSplittable*/ false)); 3757 DEBUG(dbgs() << " new slice [" << NewSlices.back().beginOffset() 3758 << ", " << NewSlices.back().endOffset() << "): " << *PStore 3759 << "\n"); 3760 if (!SplitLoads) { 3761 DEBUG(dbgs() << " of split load: " << *PLoad << "\n"); 3762 } 3763 3764 // See if we've finished all the splits. 3765 if (Idx >= Size) 3766 break; 3767 3768 // Setup the next partition. 3769 PartOffset = Offsets.Splits[Idx]; 3770 ++Idx; 3771 PartSize = (Idx < Size ? Offsets.Splits[Idx] : StoreSize) - PartOffset; 3772 } 3773 3774 // We want to immediately iterate on any allocas impacted by splitting 3775 // this load, which is only relevant if it isn't a load of this alloca and 3776 // thus we didn't already split the loads above. We also have to keep track 3777 // of any promotable allocas we split loads on as they can no longer be 3778 // promoted. 3779 if (!SplitLoads) { 3780 if (AllocaInst *OtherAI = dyn_cast<AllocaInst>(LoadBasePtr)) { 3781 assert(OtherAI != &AI && "We can't re-split our own alloca!"); 3782 ResplitPromotableAllocas.insert(OtherAI); 3783 Worklist.insert(OtherAI); 3784 } else if (AllocaInst *OtherAI = dyn_cast<AllocaInst>( 3785 LoadBasePtr->stripInBoundsOffsets())) { 3786 assert(OtherAI != &AI && "We can't re-split our own alloca!"); 3787 Worklist.insert(OtherAI); 3788 } 3789 } 3790 3791 // Mark the original store as dead now that we've split it up and kill its 3792 // slice. Note that we leave the original load in place unless this store 3793 // was its only use. It may in turn be split up if it is an alloca load 3794 // for some other alloca, but it may be a normal load. This may introduce 3795 // redundant loads, but where those can be merged the rest of the optimizer 3796 // should handle the merging, and this uncovers SSA splits which is more 3797 // important. In practice, the original loads will almost always be fully 3798 // split and removed eventually, and the splits will be merged by any 3799 // trivial CSE, including instcombine. 3800 if (LI->hasOneUse()) { 3801 assert(*LI->user_begin() == SI && "Single use isn't this store!"); 3802 DeadInsts.insert(LI); 3803 } 3804 DeadInsts.insert(SI); 3805 Offsets.S->kill(); 3806 } 3807 3808 // Remove the killed slices that have ben pre-split. 3809 AS.erase(remove_if(AS, [](const Slice &S) { return S.isDead(); }), AS.end()); 3810 3811 // Insert our new slices. This will sort and merge them into the sorted 3812 // sequence. 3813 AS.insert(NewSlices); 3814 3815 DEBUG(dbgs() << " Pre-split slices:\n"); 3816 #ifndef NDEBUG 3817 for (auto I = AS.begin(), E = AS.end(); I != E; ++I) 3818 DEBUG(AS.print(dbgs(), I, " ")); 3819 #endif 3820 3821 // Finally, don't try to promote any allocas that new require re-splitting. 3822 // They have already been added to the worklist above. 3823 PromotableAllocas.erase( 3824 remove_if( 3825 PromotableAllocas, 3826 [&](AllocaInst *AI) { return ResplitPromotableAllocas.count(AI); }), 3827 PromotableAllocas.end()); 3828 3829 return true; 3830 } 3831 3832 /// \brief Rewrite an alloca partition's users. 3833 /// 3834 /// This routine drives both of the rewriting goals of the SROA pass. It tries 3835 /// to rewrite uses of an alloca partition to be conducive for SSA value 3836 /// promotion. If the partition needs a new, more refined alloca, this will 3837 /// build that new alloca, preserving as much type information as possible, and 3838 /// rewrite the uses of the old alloca to point at the new one and have the 3839 /// appropriate new offsets. It also evaluates how successful the rewrite was 3840 /// at enabling promotion and if it was successful queues the alloca to be 3841 /// promoted. 3842 AllocaInst *SROA::rewritePartition(AllocaInst &AI, AllocaSlices &AS, 3843 Partition &P) { 3844 // Try to compute a friendly type for this partition of the alloca. This 3845 // won't always succeed, in which case we fall back to a legal integer type 3846 // or an i8 array of an appropriate size. 3847 Type *SliceTy = nullptr; 3848 const DataLayout &DL = AI.getModule()->getDataLayout(); 3849 if (Type *CommonUseTy = findCommonType(P.begin(), P.end(), P.endOffset())) 3850 if (DL.getTypeAllocSize(CommonUseTy) >= P.size()) 3851 SliceTy = CommonUseTy; 3852 if (!SliceTy) 3853 if (Type *TypePartitionTy = getTypePartition(DL, AI.getAllocatedType(), 3854 P.beginOffset(), P.size())) 3855 SliceTy = TypePartitionTy; 3856 if ((!SliceTy || (SliceTy->isArrayTy() && 3857 SliceTy->getArrayElementType()->isIntegerTy())) && 3858 DL.isLegalInteger(P.size() * 8)) 3859 SliceTy = Type::getIntNTy(*C, P.size() * 8); 3860 if (!SliceTy) 3861 SliceTy = ArrayType::get(Type::getInt8Ty(*C), P.size()); 3862 assert(DL.getTypeAllocSize(SliceTy) >= P.size()); 3863 3864 bool IsIntegerPromotable = isIntegerWideningViable(P, SliceTy, DL); 3865 3866 VectorType *VecTy = 3867 IsIntegerPromotable ? nullptr : isVectorPromotionViable(P, DL); 3868 if (VecTy) 3869 SliceTy = VecTy; 3870 3871 // Check for the case where we're going to rewrite to a new alloca of the 3872 // exact same type as the original, and with the same access offsets. In that 3873 // case, re-use the existing alloca, but still run through the rewriter to 3874 // perform phi and select speculation. 3875 AllocaInst *NewAI; 3876 if (SliceTy == AI.getAllocatedType()) { 3877 assert(P.beginOffset() == 0 && 3878 "Non-zero begin offset but same alloca type"); 3879 NewAI = &AI; 3880 // FIXME: We should be able to bail at this point with "nothing changed". 3881 // FIXME: We might want to defer PHI speculation until after here. 3882 // FIXME: return nullptr; 3883 } else { 3884 unsigned Alignment = AI.getAlignment(); 3885 if (!Alignment) { 3886 // The minimum alignment which users can rely on when the explicit 3887 // alignment is omitted or zero is that required by the ABI for this 3888 // type. 3889 Alignment = DL.getABITypeAlignment(AI.getAllocatedType()); 3890 } 3891 Alignment = MinAlign(Alignment, P.beginOffset()); 3892 // If we will get at least this much alignment from the type alone, leave 3893 // the alloca's alignment unconstrained. 3894 if (Alignment <= DL.getABITypeAlignment(SliceTy)) 3895 Alignment = 0; 3896 NewAI = new AllocaInst( 3897 SliceTy, AI.getType()->getAddressSpace(), nullptr, Alignment, 3898 AI.getName() + ".sroa." + Twine(P.begin() - AS.begin()), &AI); 3899 ++NumNewAllocas; 3900 } 3901 3902 DEBUG(dbgs() << "Rewriting alloca partition " 3903 << "[" << P.beginOffset() << "," << P.endOffset() 3904 << ") to: " << *NewAI << "\n"); 3905 3906 // Track the high watermark on the worklist as it is only relevant for 3907 // promoted allocas. We will reset it to this point if the alloca is not in 3908 // fact scheduled for promotion. 3909 unsigned PPWOldSize = PostPromotionWorklist.size(); 3910 unsigned NumUses = 0; 3911 SmallSetVector<PHINode *, 8> PHIUsers; 3912 SmallSetVector<SelectInst *, 8> SelectUsers; 3913 3914 AllocaSliceRewriter Rewriter(DL, AS, *this, AI, *NewAI, P.beginOffset(), 3915 P.endOffset(), IsIntegerPromotable, VecTy, 3916 PHIUsers, SelectUsers); 3917 bool Promotable = true; 3918 for (Slice *S : P.splitSliceTails()) { 3919 Promotable &= Rewriter.visit(S); 3920 ++NumUses; 3921 } 3922 for (Slice &S : P) { 3923 Promotable &= Rewriter.visit(&S); 3924 ++NumUses; 3925 } 3926 3927 NumAllocaPartitionUses += NumUses; 3928 MaxUsesPerAllocaPartition.updateMax(NumUses); 3929 3930 // Now that we've processed all the slices in the new partition, check if any 3931 // PHIs or Selects would block promotion. 3932 for (PHINode *PHI : PHIUsers) 3933 if (!isSafePHIToSpeculate(*PHI)) { 3934 Promotable = false; 3935 PHIUsers.clear(); 3936 SelectUsers.clear(); 3937 break; 3938 } 3939 3940 for (SelectInst *Sel : SelectUsers) 3941 if (!isSafeSelectToSpeculate(*Sel)) { 3942 Promotable = false; 3943 PHIUsers.clear(); 3944 SelectUsers.clear(); 3945 break; 3946 } 3947 3948 if (Promotable) { 3949 if (PHIUsers.empty() && SelectUsers.empty()) { 3950 // Promote the alloca. 3951 PromotableAllocas.push_back(NewAI); 3952 } else { 3953 // If we have either PHIs or Selects to speculate, add them to those 3954 // worklists and re-queue the new alloca so that we promote in on the 3955 // next iteration. 3956 for (PHINode *PHIUser : PHIUsers) 3957 SpeculatablePHIs.insert(PHIUser); 3958 for (SelectInst *SelectUser : SelectUsers) 3959 SpeculatableSelects.insert(SelectUser); 3960 Worklist.insert(NewAI); 3961 } 3962 } else { 3963 // Drop any post-promotion work items if promotion didn't happen. 3964 while (PostPromotionWorklist.size() > PPWOldSize) 3965 PostPromotionWorklist.pop_back(); 3966 3967 // We couldn't promote and we didn't create a new partition, nothing 3968 // happened. 3969 if (NewAI == &AI) 3970 return nullptr; 3971 3972 // If we can't promote the alloca, iterate on it to check for new 3973 // refinements exposed by splitting the current alloca. Don't iterate on an 3974 // alloca which didn't actually change and didn't get promoted. 3975 Worklist.insert(NewAI); 3976 } 3977 3978 return NewAI; 3979 } 3980 3981 /// \brief Walks the slices of an alloca and form partitions based on them, 3982 /// rewriting each of their uses. 3983 bool SROA::splitAlloca(AllocaInst &AI, AllocaSlices &AS) { 3984 if (AS.begin() == AS.end()) 3985 return false; 3986 3987 unsigned NumPartitions = 0; 3988 bool Changed = false; 3989 const DataLayout &DL = AI.getModule()->getDataLayout(); 3990 3991 // First try to pre-split loads and stores. 3992 Changed |= presplitLoadsAndStores(AI, AS); 3993 3994 // Now that we have identified any pre-splitting opportunities, mark any 3995 // splittable (non-whole-alloca) loads and stores as unsplittable. If we fail 3996 // to split these during pre-splitting, we want to force them to be 3997 // rewritten into a partition. 3998 bool IsSorted = true; 3999 for (Slice &S : AS) { 4000 if (!S.isSplittable()) 4001 continue; 4002 // FIXME: We currently leave whole-alloca splittable loads and stores. This 4003 // used to be the only splittable loads and stores and we need to be 4004 // confident that the above handling of splittable loads and stores is 4005 // completely sufficient before we forcibly disable the remaining handling. 4006 if (S.beginOffset() == 0 && 4007 S.endOffset() >= DL.getTypeAllocSize(AI.getAllocatedType())) 4008 continue; 4009 if (isa<LoadInst>(S.getUse()->getUser()) || 4010 isa<StoreInst>(S.getUse()->getUser())) { 4011 S.makeUnsplittable(); 4012 IsSorted = false; 4013 } 4014 } 4015 if (!IsSorted) 4016 std::sort(AS.begin(), AS.end()); 4017 4018 /// Describes the allocas introduced by rewritePartition in order to migrate 4019 /// the debug info. 4020 struct Fragment { 4021 AllocaInst *Alloca; 4022 uint64_t Offset; 4023 uint64_t Size; 4024 Fragment(AllocaInst *AI, uint64_t O, uint64_t S) 4025 : Alloca(AI), Offset(O), Size(S) {} 4026 }; 4027 SmallVector<Fragment, 4> Fragments; 4028 4029 // Rewrite each partition. 4030 for (auto &P : AS.partitions()) { 4031 if (AllocaInst *NewAI = rewritePartition(AI, AS, P)) { 4032 Changed = true; 4033 if (NewAI != &AI) { 4034 uint64_t SizeOfByte = 8; 4035 uint64_t AllocaSize = DL.getTypeSizeInBits(NewAI->getAllocatedType()); 4036 // Don't include any padding. 4037 uint64_t Size = std::min(AllocaSize, P.size() * SizeOfByte); 4038 Fragments.push_back(Fragment(NewAI, P.beginOffset() * SizeOfByte, Size)); 4039 } 4040 } 4041 ++NumPartitions; 4042 } 4043 4044 NumAllocaPartitions += NumPartitions; 4045 MaxPartitionsPerAlloca.updateMax(NumPartitions); 4046 4047 // Migrate debug information from the old alloca to the new alloca(s) 4048 // and the individual partitions. 4049 if (DbgDeclareInst *DbgDecl = FindAllocaDbgDeclare(&AI)) { 4050 auto *Var = DbgDecl->getVariable(); 4051 auto *Expr = DbgDecl->getExpression(); 4052 DIBuilder DIB(*AI.getModule(), /*AllowUnresolved*/ false); 4053 uint64_t AllocaSize = DL.getTypeSizeInBits(AI.getAllocatedType()); 4054 for (auto Fragment : Fragments) { 4055 // Create a fragment expression describing the new partition or reuse AI's 4056 // expression if there is only one partition. 4057 auto *FragmentExpr = Expr; 4058 if (Fragment.Size < AllocaSize || Expr->isFragment()) { 4059 // If this alloca is already a scalar replacement of a larger aggregate, 4060 // Fragment.Offset describes the offset inside the scalar. 4061 auto ExprFragment = Expr->getFragmentInfo(); 4062 uint64_t Offset = ExprFragment ? ExprFragment->OffsetInBits : 0; 4063 uint64_t Start = Offset + Fragment.Offset; 4064 uint64_t Size = Fragment.Size; 4065 if (ExprFragment) { 4066 uint64_t AbsEnd = 4067 ExprFragment->OffsetInBits + ExprFragment->SizeInBits; 4068 if (Start >= AbsEnd) 4069 // No need to describe a SROAed padding. 4070 continue; 4071 Size = std::min(Size, AbsEnd - Start); 4072 } 4073 FragmentExpr = DIB.createFragmentExpression(Start, Size); 4074 } 4075 4076 // Remove any existing dbg.declare intrinsic describing the same alloca. 4077 if (DbgDeclareInst *OldDDI = FindAllocaDbgDeclare(Fragment.Alloca)) 4078 OldDDI->eraseFromParent(); 4079 4080 DIB.insertDeclare(Fragment.Alloca, Var, FragmentExpr, 4081 DbgDecl->getDebugLoc(), &AI); 4082 } 4083 } 4084 return Changed; 4085 } 4086 4087 /// \brief Clobber a use with undef, deleting the used value if it becomes dead. 4088 void SROA::clobberUse(Use &U) { 4089 Value *OldV = U; 4090 // Replace the use with an undef value. 4091 U = UndefValue::get(OldV->getType()); 4092 4093 // Check for this making an instruction dead. We have to garbage collect 4094 // all the dead instructions to ensure the uses of any alloca end up being 4095 // minimal. 4096 if (Instruction *OldI = dyn_cast<Instruction>(OldV)) 4097 if (isInstructionTriviallyDead(OldI)) { 4098 DeadInsts.insert(OldI); 4099 } 4100 } 4101 4102 /// \brief Analyze an alloca for SROA. 4103 /// 4104 /// This analyzes the alloca to ensure we can reason about it, builds 4105 /// the slices of the alloca, and then hands it off to be split and 4106 /// rewritten as needed. 4107 bool SROA::runOnAlloca(AllocaInst &AI) { 4108 DEBUG(dbgs() << "SROA alloca: " << AI << "\n"); 4109 ++NumAllocasAnalyzed; 4110 4111 // Special case dead allocas, as they're trivial. 4112 if (AI.use_empty()) { 4113 AI.eraseFromParent(); 4114 return true; 4115 } 4116 const DataLayout &DL = AI.getModule()->getDataLayout(); 4117 4118 // Skip alloca forms that this analysis can't handle. 4119 if (AI.isArrayAllocation() || !AI.getAllocatedType()->isSized() || 4120 DL.getTypeAllocSize(AI.getAllocatedType()) == 0) 4121 return false; 4122 4123 bool Changed = false; 4124 4125 // First, split any FCA loads and stores touching this alloca to promote 4126 // better splitting and promotion opportunities. 4127 AggLoadStoreRewriter AggRewriter; 4128 Changed |= AggRewriter.rewrite(AI); 4129 4130 // Build the slices using a recursive instruction-visiting builder. 4131 AllocaSlices AS(DL, AI); 4132 DEBUG(AS.print(dbgs())); 4133 if (AS.isEscaped()) 4134 return Changed; 4135 4136 // Delete all the dead users of this alloca before splitting and rewriting it. 4137 for (Instruction *DeadUser : AS.getDeadUsers()) { 4138 // Free up everything used by this instruction. 4139 for (Use &DeadOp : DeadUser->operands()) 4140 clobberUse(DeadOp); 4141 4142 // Now replace the uses of this instruction. 4143 DeadUser->replaceAllUsesWith(UndefValue::get(DeadUser->getType())); 4144 4145 // And mark it for deletion. 4146 DeadInsts.insert(DeadUser); 4147 Changed = true; 4148 } 4149 for (Use *DeadOp : AS.getDeadOperands()) { 4150 clobberUse(*DeadOp); 4151 Changed = true; 4152 } 4153 4154 // No slices to split. Leave the dead alloca for a later pass to clean up. 4155 if (AS.begin() == AS.end()) 4156 return Changed; 4157 4158 Changed |= splitAlloca(AI, AS); 4159 4160 DEBUG(dbgs() << " Speculating PHIs\n"); 4161 while (!SpeculatablePHIs.empty()) 4162 speculatePHINodeLoads(*SpeculatablePHIs.pop_back_val()); 4163 4164 DEBUG(dbgs() << " Speculating Selects\n"); 4165 while (!SpeculatableSelects.empty()) 4166 speculateSelectInstLoads(*SpeculatableSelects.pop_back_val()); 4167 4168 return Changed; 4169 } 4170 4171 /// \brief Delete the dead instructions accumulated in this run. 4172 /// 4173 /// Recursively deletes the dead instructions we've accumulated. This is done 4174 /// at the very end to maximize locality of the recursive delete and to 4175 /// minimize the problems of invalidated instruction pointers as such pointers 4176 /// are used heavily in the intermediate stages of the algorithm. 4177 /// 4178 /// We also record the alloca instructions deleted here so that they aren't 4179 /// subsequently handed to mem2reg to promote. 4180 void SROA::deleteDeadInstructions( 4181 SmallPtrSetImpl<AllocaInst *> &DeletedAllocas) { 4182 while (!DeadInsts.empty()) { 4183 Instruction *I = DeadInsts.pop_back_val(); 4184 DEBUG(dbgs() << "Deleting dead instruction: " << *I << "\n"); 4185 4186 I->replaceAllUsesWith(UndefValue::get(I->getType())); 4187 4188 for (Use &Operand : I->operands()) 4189 if (Instruction *U = dyn_cast<Instruction>(Operand)) { 4190 // Zero out the operand and see if it becomes trivially dead. 4191 Operand = nullptr; 4192 if (isInstructionTriviallyDead(U)) 4193 DeadInsts.insert(U); 4194 } 4195 4196 if (AllocaInst *AI = dyn_cast<AllocaInst>(I)) { 4197 DeletedAllocas.insert(AI); 4198 if (DbgDeclareInst *DbgDecl = FindAllocaDbgDeclare(AI)) 4199 DbgDecl->eraseFromParent(); 4200 } 4201 4202 ++NumDeleted; 4203 I->eraseFromParent(); 4204 } 4205 } 4206 4207 /// \brief Promote the allocas, using the best available technique. 4208 /// 4209 /// This attempts to promote whatever allocas have been identified as viable in 4210 /// the PromotableAllocas list. If that list is empty, there is nothing to do. 4211 /// This function returns whether any promotion occurred. 4212 bool SROA::promoteAllocas(Function &F) { 4213 if (PromotableAllocas.empty()) 4214 return false; 4215 4216 NumPromoted += PromotableAllocas.size(); 4217 4218 DEBUG(dbgs() << "Promoting allocas with mem2reg...\n"); 4219 PromoteMemToReg(PromotableAllocas, *DT, AC); 4220 PromotableAllocas.clear(); 4221 return true; 4222 } 4223 4224 PreservedAnalyses SROA::runImpl(Function &F, DominatorTree &RunDT, 4225 AssumptionCache &RunAC) { 4226 DEBUG(dbgs() << "SROA function: " << F.getName() << "\n"); 4227 C = &F.getContext(); 4228 DT = &RunDT; 4229 AC = &RunAC; 4230 4231 BasicBlock &EntryBB = F.getEntryBlock(); 4232 for (BasicBlock::iterator I = EntryBB.begin(), E = std::prev(EntryBB.end()); 4233 I != E; ++I) { 4234 if (AllocaInst *AI = dyn_cast<AllocaInst>(I)) 4235 Worklist.insert(AI); 4236 } 4237 4238 bool Changed = false; 4239 // A set of deleted alloca instruction pointers which should be removed from 4240 // the list of promotable allocas. 4241 SmallPtrSet<AllocaInst *, 4> DeletedAllocas; 4242 4243 do { 4244 while (!Worklist.empty()) { 4245 Changed |= runOnAlloca(*Worklist.pop_back_val()); 4246 deleteDeadInstructions(DeletedAllocas); 4247 4248 // Remove the deleted allocas from various lists so that we don't try to 4249 // continue processing them. 4250 if (!DeletedAllocas.empty()) { 4251 auto IsInSet = [&](AllocaInst *AI) { return DeletedAllocas.count(AI); }; 4252 Worklist.remove_if(IsInSet); 4253 PostPromotionWorklist.remove_if(IsInSet); 4254 PromotableAllocas.erase(remove_if(PromotableAllocas, IsInSet), 4255 PromotableAllocas.end()); 4256 DeletedAllocas.clear(); 4257 } 4258 } 4259 4260 Changed |= promoteAllocas(F); 4261 4262 Worklist = PostPromotionWorklist; 4263 PostPromotionWorklist.clear(); 4264 } while (!Worklist.empty()); 4265 4266 if (!Changed) 4267 return PreservedAnalyses::all(); 4268 4269 PreservedAnalyses PA; 4270 PA.preserveSet<CFGAnalyses>(); 4271 PA.preserve<GlobalsAA>(); 4272 return PA; 4273 } 4274 4275 PreservedAnalyses SROA::run(Function &F, FunctionAnalysisManager &AM) { 4276 return runImpl(F, AM.getResult<DominatorTreeAnalysis>(F), 4277 AM.getResult<AssumptionAnalysis>(F)); 4278 } 4279 4280 /// A legacy pass for the legacy pass manager that wraps the \c SROA pass. 4281 /// 4282 /// This is in the llvm namespace purely to allow it to be a friend of the \c 4283 /// SROA pass. 4284 class llvm::sroa::SROALegacyPass : public FunctionPass { 4285 /// The SROA implementation. 4286 SROA Impl; 4287 4288 public: 4289 SROALegacyPass() : FunctionPass(ID) { 4290 initializeSROALegacyPassPass(*PassRegistry::getPassRegistry()); 4291 } 4292 bool runOnFunction(Function &F) override { 4293 if (skipFunction(F)) 4294 return false; 4295 4296 auto PA = Impl.runImpl( 4297 F, getAnalysis<DominatorTreeWrapperPass>().getDomTree(), 4298 getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F)); 4299 return !PA.areAllPreserved(); 4300 } 4301 void getAnalysisUsage(AnalysisUsage &AU) const override { 4302 AU.addRequired<AssumptionCacheTracker>(); 4303 AU.addRequired<DominatorTreeWrapperPass>(); 4304 AU.addPreserved<GlobalsAAWrapperPass>(); 4305 AU.setPreservesCFG(); 4306 } 4307 4308 StringRef getPassName() const override { return "SROA"; } 4309 static char ID; 4310 }; 4311 4312 char SROALegacyPass::ID = 0; 4313 4314 FunctionPass *llvm::createSROAPass() { return new SROALegacyPass(); } 4315 4316 INITIALIZE_PASS_BEGIN(SROALegacyPass, "sroa", 4317 "Scalar Replacement Of Aggregates", false, false) 4318 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 4319 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 4320 INITIALIZE_PASS_END(SROALegacyPass, "sroa", "Scalar Replacement Of Aggregates", 4321 false, false) 4322