1 //===- SROA.cpp - Scalar Replacement Of Aggregates ------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 /// \file 10 /// This transformation implements the well known scalar replacement of 11 /// aggregates transformation. It tries to identify promotable elements of an 12 /// aggregate alloca, and promote them to registers. It will also try to 13 /// convert uses of an element (or set of elements) of an alloca into a vector 14 /// or bitfield-style integer scalar if appropriate. 15 /// 16 /// It works to do this with minimal slicing of the alloca so that regions 17 /// which are merely transferred in and out of external memory remain unchanged 18 /// and are not decomposed to scalar code. 19 /// 20 /// Because this also performs alloca promotion, it can be thought of as also 21 /// serving the purpose of SSA formation. The algorithm iterates on the 22 /// function until all opportunities for promotion have been realized. 23 /// 24 //===----------------------------------------------------------------------===// 25 26 #include "llvm/Transforms/Scalar/SROA.h" 27 #include "llvm/ADT/APInt.h" 28 #include "llvm/ADT/ArrayRef.h" 29 #include "llvm/ADT/DenseMap.h" 30 #include "llvm/ADT/PointerIntPair.h" 31 #include "llvm/ADT/STLExtras.h" 32 #include "llvm/ADT/SetVector.h" 33 #include "llvm/ADT/SmallBitVector.h" 34 #include "llvm/ADT/SmallPtrSet.h" 35 #include "llvm/ADT/SmallVector.h" 36 #include "llvm/ADT/Statistic.h" 37 #include "llvm/ADT/StringRef.h" 38 #include "llvm/ADT/Twine.h" 39 #include "llvm/ADT/iterator.h" 40 #include "llvm/ADT/iterator_range.h" 41 #include "llvm/Analysis/AssumptionCache.h" 42 #include "llvm/Analysis/GlobalsModRef.h" 43 #include "llvm/Analysis/Loads.h" 44 #include "llvm/Analysis/PtrUseVisitor.h" 45 #include "llvm/Transforms/Utils/Local.h" 46 #include "llvm/Config/llvm-config.h" 47 #include "llvm/IR/BasicBlock.h" 48 #include "llvm/IR/Constant.h" 49 #include "llvm/IR/ConstantFolder.h" 50 #include "llvm/IR/Constants.h" 51 #include "llvm/IR/DIBuilder.h" 52 #include "llvm/IR/DataLayout.h" 53 #include "llvm/IR/DebugInfoMetadata.h" 54 #include "llvm/IR/DerivedTypes.h" 55 #include "llvm/IR/Dominators.h" 56 #include "llvm/IR/Function.h" 57 #include "llvm/IR/GetElementPtrTypeIterator.h" 58 #include "llvm/IR/GlobalAlias.h" 59 #include "llvm/IR/IRBuilder.h" 60 #include "llvm/IR/InstVisitor.h" 61 #include "llvm/IR/InstrTypes.h" 62 #include "llvm/IR/Instruction.h" 63 #include "llvm/IR/Instructions.h" 64 #include "llvm/IR/IntrinsicInst.h" 65 #include "llvm/IR/Intrinsics.h" 66 #include "llvm/IR/LLVMContext.h" 67 #include "llvm/IR/Metadata.h" 68 #include "llvm/IR/Module.h" 69 #include "llvm/IR/Operator.h" 70 #include "llvm/IR/PassManager.h" 71 #include "llvm/IR/Type.h" 72 #include "llvm/IR/Use.h" 73 #include "llvm/IR/User.h" 74 #include "llvm/IR/Value.h" 75 #include "llvm/Pass.h" 76 #include "llvm/Support/Casting.h" 77 #include "llvm/Support/CommandLine.h" 78 #include "llvm/Support/Compiler.h" 79 #include "llvm/Support/Debug.h" 80 #include "llvm/Support/ErrorHandling.h" 81 #include "llvm/Support/MathExtras.h" 82 #include "llvm/Support/raw_ostream.h" 83 #include "llvm/Transforms/Scalar.h" 84 #include "llvm/Transforms/Utils/PromoteMemToReg.h" 85 #include <algorithm> 86 #include <cassert> 87 #include <chrono> 88 #include <cstddef> 89 #include <cstdint> 90 #include <cstring> 91 #include <iterator> 92 #include <string> 93 #include <tuple> 94 #include <utility> 95 #include <vector> 96 97 #ifndef NDEBUG 98 // We only use this for a debug check. 99 #include <random> 100 #endif 101 102 using namespace llvm; 103 using namespace llvm::sroa; 104 105 #define DEBUG_TYPE "sroa" 106 107 STATISTIC(NumAllocasAnalyzed, "Number of allocas analyzed for replacement"); 108 STATISTIC(NumAllocaPartitions, "Number of alloca partitions formed"); 109 STATISTIC(MaxPartitionsPerAlloca, "Maximum number of partitions per alloca"); 110 STATISTIC(NumAllocaPartitionUses, "Number of alloca partition uses rewritten"); 111 STATISTIC(MaxUsesPerAllocaPartition, "Maximum number of uses of a partition"); 112 STATISTIC(NumNewAllocas, "Number of new, smaller allocas introduced"); 113 STATISTIC(NumPromoted, "Number of allocas promoted to SSA values"); 114 STATISTIC(NumLoadsSpeculated, "Number of loads speculated to allow promotion"); 115 STATISTIC(NumDeleted, "Number of instructions deleted"); 116 STATISTIC(NumVectorized, "Number of vectorized aggregates"); 117 118 /// Hidden option to enable randomly shuffling the slices to help uncover 119 /// instability in their order. 120 static cl::opt<bool> SROARandomShuffleSlices("sroa-random-shuffle-slices", 121 cl::init(false), cl::Hidden); 122 123 /// Hidden option to experiment with completely strict handling of inbounds 124 /// GEPs. 125 static cl::opt<bool> SROAStrictInbounds("sroa-strict-inbounds", cl::init(false), 126 cl::Hidden); 127 128 namespace { 129 130 /// A custom IRBuilder inserter which prefixes all names, but only in 131 /// Assert builds. 132 class IRBuilderPrefixedInserter : public IRBuilderDefaultInserter { 133 std::string Prefix; 134 135 const Twine getNameWithPrefix(const Twine &Name) const { 136 return Name.isTriviallyEmpty() ? Name : Prefix + Name; 137 } 138 139 public: 140 void SetNamePrefix(const Twine &P) { Prefix = P.str(); } 141 142 protected: 143 void InsertHelper(Instruction *I, const Twine &Name, BasicBlock *BB, 144 BasicBlock::iterator InsertPt) const { 145 IRBuilderDefaultInserter::InsertHelper(I, getNameWithPrefix(Name), BB, 146 InsertPt); 147 } 148 }; 149 150 /// Provide a type for IRBuilder that drops names in release builds. 151 using IRBuilderTy = IRBuilder<ConstantFolder, IRBuilderPrefixedInserter>; 152 153 /// A used slice of an alloca. 154 /// 155 /// This structure represents a slice of an alloca used by some instruction. It 156 /// stores both the begin and end offsets of this use, a pointer to the use 157 /// itself, and a flag indicating whether we can classify the use as splittable 158 /// or not when forming partitions of the alloca. 159 class Slice { 160 /// The beginning offset of the range. 161 uint64_t BeginOffset = 0; 162 163 /// The ending offset, not included in the range. 164 uint64_t EndOffset = 0; 165 166 /// Storage for both the use of this slice and whether it can be 167 /// split. 168 PointerIntPair<Use *, 1, bool> UseAndIsSplittable; 169 170 public: 171 Slice() = default; 172 173 Slice(uint64_t BeginOffset, uint64_t EndOffset, Use *U, bool IsSplittable) 174 : BeginOffset(BeginOffset), EndOffset(EndOffset), 175 UseAndIsSplittable(U, IsSplittable) {} 176 177 uint64_t beginOffset() const { return BeginOffset; } 178 uint64_t endOffset() const { return EndOffset; } 179 180 bool isSplittable() const { return UseAndIsSplittable.getInt(); } 181 void makeUnsplittable() { UseAndIsSplittable.setInt(false); } 182 183 Use *getUse() const { return UseAndIsSplittable.getPointer(); } 184 185 bool isDead() const { return getUse() == nullptr; } 186 void kill() { UseAndIsSplittable.setPointer(nullptr); } 187 188 /// Support for ordering ranges. 189 /// 190 /// This provides an ordering over ranges such that start offsets are 191 /// always increasing, and within equal start offsets, the end offsets are 192 /// decreasing. Thus the spanning range comes first in a cluster with the 193 /// same start position. 194 bool operator<(const Slice &RHS) const { 195 if (beginOffset() < RHS.beginOffset()) 196 return true; 197 if (beginOffset() > RHS.beginOffset()) 198 return false; 199 if (isSplittable() != RHS.isSplittable()) 200 return !isSplittable(); 201 if (endOffset() > RHS.endOffset()) 202 return true; 203 return false; 204 } 205 206 /// Support comparison with a single offset to allow binary searches. 207 friend LLVM_ATTRIBUTE_UNUSED bool operator<(const Slice &LHS, 208 uint64_t RHSOffset) { 209 return LHS.beginOffset() < RHSOffset; 210 } 211 friend LLVM_ATTRIBUTE_UNUSED bool operator<(uint64_t LHSOffset, 212 const Slice &RHS) { 213 return LHSOffset < RHS.beginOffset(); 214 } 215 216 bool operator==(const Slice &RHS) const { 217 return isSplittable() == RHS.isSplittable() && 218 beginOffset() == RHS.beginOffset() && endOffset() == RHS.endOffset(); 219 } 220 bool operator!=(const Slice &RHS) const { return !operator==(RHS); } 221 }; 222 223 } // end anonymous namespace 224 225 namespace llvm { 226 227 template <typename T> struct isPodLike; 228 template <> struct isPodLike<Slice> { static const bool value = true; }; 229 230 } // end namespace llvm 231 232 /// Representation of the alloca slices. 233 /// 234 /// This class represents the slices of an alloca which are formed by its 235 /// various uses. If a pointer escapes, we can't fully build a representation 236 /// for the slices used and we reflect that in this structure. The uses are 237 /// stored, sorted by increasing beginning offset and with unsplittable slices 238 /// starting at a particular offset before splittable slices. 239 class llvm::sroa::AllocaSlices { 240 public: 241 /// Construct the slices of a particular alloca. 242 AllocaSlices(const DataLayout &DL, AllocaInst &AI); 243 244 /// Test whether a pointer to the allocation escapes our analysis. 245 /// 246 /// If this is true, the slices are never fully built and should be 247 /// ignored. 248 bool isEscaped() const { return PointerEscapingInstr; } 249 250 /// Support for iterating over the slices. 251 /// @{ 252 using iterator = SmallVectorImpl<Slice>::iterator; 253 using range = iterator_range<iterator>; 254 255 iterator begin() { return Slices.begin(); } 256 iterator end() { return Slices.end(); } 257 258 using const_iterator = SmallVectorImpl<Slice>::const_iterator; 259 using const_range = iterator_range<const_iterator>; 260 261 const_iterator begin() const { return Slices.begin(); } 262 const_iterator end() const { return Slices.end(); } 263 /// @} 264 265 /// Erase a range of slices. 266 void erase(iterator Start, iterator Stop) { Slices.erase(Start, Stop); } 267 268 /// Insert new slices for this alloca. 269 /// 270 /// This moves the slices into the alloca's slices collection, and re-sorts 271 /// everything so that the usual ordering properties of the alloca's slices 272 /// hold. 273 void insert(ArrayRef<Slice> NewSlices) { 274 int OldSize = Slices.size(); 275 Slices.append(NewSlices.begin(), NewSlices.end()); 276 auto SliceI = Slices.begin() + OldSize; 277 llvm::sort(SliceI, Slices.end()); 278 std::inplace_merge(Slices.begin(), SliceI, Slices.end()); 279 } 280 281 // Forward declare the iterator and range accessor for walking the 282 // partitions. 283 class partition_iterator; 284 iterator_range<partition_iterator> partitions(); 285 286 /// Access the dead users for this alloca. 287 ArrayRef<Instruction *> getDeadUsers() const { return DeadUsers; } 288 289 /// Access the dead operands referring to this alloca. 290 /// 291 /// These are operands which have cannot actually be used to refer to the 292 /// alloca as they are outside its range and the user doesn't correct for 293 /// that. These mostly consist of PHI node inputs and the like which we just 294 /// need to replace with undef. 295 ArrayRef<Use *> getDeadOperands() const { return DeadOperands; } 296 297 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 298 void print(raw_ostream &OS, const_iterator I, StringRef Indent = " ") const; 299 void printSlice(raw_ostream &OS, const_iterator I, 300 StringRef Indent = " ") const; 301 void printUse(raw_ostream &OS, const_iterator I, 302 StringRef Indent = " ") const; 303 void print(raw_ostream &OS) const; 304 void dump(const_iterator I) const; 305 void dump() const; 306 #endif 307 308 private: 309 template <typename DerivedT, typename RetT = void> class BuilderBase; 310 class SliceBuilder; 311 312 friend class AllocaSlices::SliceBuilder; 313 314 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 315 /// Handle to alloca instruction to simplify method interfaces. 316 AllocaInst &AI; 317 #endif 318 319 /// The instruction responsible for this alloca not having a known set 320 /// of slices. 321 /// 322 /// When an instruction (potentially) escapes the pointer to the alloca, we 323 /// store a pointer to that here and abort trying to form slices of the 324 /// alloca. This will be null if the alloca slices are analyzed successfully. 325 Instruction *PointerEscapingInstr; 326 327 /// The slices of the alloca. 328 /// 329 /// We store a vector of the slices formed by uses of the alloca here. This 330 /// vector is sorted by increasing begin offset, and then the unsplittable 331 /// slices before the splittable ones. See the Slice inner class for more 332 /// details. 333 SmallVector<Slice, 8> Slices; 334 335 /// Instructions which will become dead if we rewrite the alloca. 336 /// 337 /// Note that these are not separated by slice. This is because we expect an 338 /// alloca to be completely rewritten or not rewritten at all. If rewritten, 339 /// all these instructions can simply be removed and replaced with undef as 340 /// they come from outside of the allocated space. 341 SmallVector<Instruction *, 8> DeadUsers; 342 343 /// Operands which will become dead if we rewrite the alloca. 344 /// 345 /// These are operands that in their particular use can be replaced with 346 /// undef when we rewrite the alloca. These show up in out-of-bounds inputs 347 /// to PHI nodes and the like. They aren't entirely dead (there might be 348 /// a GEP back into the bounds using it elsewhere) and nor is the PHI, but we 349 /// want to swap this particular input for undef to simplify the use lists of 350 /// the alloca. 351 SmallVector<Use *, 8> DeadOperands; 352 }; 353 354 /// A partition of the slices. 355 /// 356 /// An ephemeral representation for a range of slices which can be viewed as 357 /// a partition of the alloca. This range represents a span of the alloca's 358 /// memory which cannot be split, and provides access to all of the slices 359 /// overlapping some part of the partition. 360 /// 361 /// Objects of this type are produced by traversing the alloca's slices, but 362 /// are only ephemeral and not persistent. 363 class llvm::sroa::Partition { 364 private: 365 friend class AllocaSlices; 366 friend class AllocaSlices::partition_iterator; 367 368 using iterator = AllocaSlices::iterator; 369 370 /// The beginning and ending offsets of the alloca for this 371 /// partition. 372 uint64_t BeginOffset, EndOffset; 373 374 /// The start and end iterators of this partition. 375 iterator SI, SJ; 376 377 /// A collection of split slice tails overlapping the partition. 378 SmallVector<Slice *, 4> SplitTails; 379 380 /// Raw constructor builds an empty partition starting and ending at 381 /// the given iterator. 382 Partition(iterator SI) : SI(SI), SJ(SI) {} 383 384 public: 385 /// The start offset of this partition. 386 /// 387 /// All of the contained slices start at or after this offset. 388 uint64_t beginOffset() const { return BeginOffset; } 389 390 /// The end offset of this partition. 391 /// 392 /// All of the contained slices end at or before this offset. 393 uint64_t endOffset() const { return EndOffset; } 394 395 /// The size of the partition. 396 /// 397 /// Note that this can never be zero. 398 uint64_t size() const { 399 assert(BeginOffset < EndOffset && "Partitions must span some bytes!"); 400 return EndOffset - BeginOffset; 401 } 402 403 /// Test whether this partition contains no slices, and merely spans 404 /// a region occupied by split slices. 405 bool empty() const { return SI == SJ; } 406 407 /// \name Iterate slices that start within the partition. 408 /// These may be splittable or unsplittable. They have a begin offset >= the 409 /// partition begin offset. 410 /// @{ 411 // FIXME: We should probably define a "concat_iterator" helper and use that 412 // to stitch together pointee_iterators over the split tails and the 413 // contiguous iterators of the partition. That would give a much nicer 414 // interface here. We could then additionally expose filtered iterators for 415 // split, unsplit, and unsplittable splices based on the usage patterns. 416 iterator begin() const { return SI; } 417 iterator end() const { return SJ; } 418 /// @} 419 420 /// Get the sequence of split slice tails. 421 /// 422 /// These tails are of slices which start before this partition but are 423 /// split and overlap into the partition. We accumulate these while forming 424 /// partitions. 425 ArrayRef<Slice *> splitSliceTails() const { return SplitTails; } 426 }; 427 428 /// An iterator over partitions of the alloca's slices. 429 /// 430 /// This iterator implements the core algorithm for partitioning the alloca's 431 /// slices. It is a forward iterator as we don't support backtracking for 432 /// efficiency reasons, and re-use a single storage area to maintain the 433 /// current set of split slices. 434 /// 435 /// It is templated on the slice iterator type to use so that it can operate 436 /// with either const or non-const slice iterators. 437 class AllocaSlices::partition_iterator 438 : public iterator_facade_base<partition_iterator, std::forward_iterator_tag, 439 Partition> { 440 friend class AllocaSlices; 441 442 /// Most of the state for walking the partitions is held in a class 443 /// with a nice interface for examining them. 444 Partition P; 445 446 /// We need to keep the end of the slices to know when to stop. 447 AllocaSlices::iterator SE; 448 449 /// We also need to keep track of the maximum split end offset seen. 450 /// FIXME: Do we really? 451 uint64_t MaxSplitSliceEndOffset = 0; 452 453 /// Sets the partition to be empty at given iterator, and sets the 454 /// end iterator. 455 partition_iterator(AllocaSlices::iterator SI, AllocaSlices::iterator SE) 456 : P(SI), SE(SE) { 457 // If not already at the end, advance our state to form the initial 458 // partition. 459 if (SI != SE) 460 advance(); 461 } 462 463 /// Advance the iterator to the next partition. 464 /// 465 /// Requires that the iterator not be at the end of the slices. 466 void advance() { 467 assert((P.SI != SE || !P.SplitTails.empty()) && 468 "Cannot advance past the end of the slices!"); 469 470 // Clear out any split uses which have ended. 471 if (!P.SplitTails.empty()) { 472 if (P.EndOffset >= MaxSplitSliceEndOffset) { 473 // If we've finished all splits, this is easy. 474 P.SplitTails.clear(); 475 MaxSplitSliceEndOffset = 0; 476 } else { 477 // Remove the uses which have ended in the prior partition. This 478 // cannot change the max split slice end because we just checked that 479 // the prior partition ended prior to that max. 480 P.SplitTails.erase(llvm::remove_if(P.SplitTails, 481 [&](Slice *S) { 482 return S->endOffset() <= 483 P.EndOffset; 484 }), 485 P.SplitTails.end()); 486 assert(llvm::any_of(P.SplitTails, 487 [&](Slice *S) { 488 return S->endOffset() == MaxSplitSliceEndOffset; 489 }) && 490 "Could not find the current max split slice offset!"); 491 assert(llvm::all_of(P.SplitTails, 492 [&](Slice *S) { 493 return S->endOffset() <= MaxSplitSliceEndOffset; 494 }) && 495 "Max split slice end offset is not actually the max!"); 496 } 497 } 498 499 // If P.SI is already at the end, then we've cleared the split tail and 500 // now have an end iterator. 501 if (P.SI == SE) { 502 assert(P.SplitTails.empty() && "Failed to clear the split slices!"); 503 return; 504 } 505 506 // If we had a non-empty partition previously, set up the state for 507 // subsequent partitions. 508 if (P.SI != P.SJ) { 509 // Accumulate all the splittable slices which started in the old 510 // partition into the split list. 511 for (Slice &S : P) 512 if (S.isSplittable() && S.endOffset() > P.EndOffset) { 513 P.SplitTails.push_back(&S); 514 MaxSplitSliceEndOffset = 515 std::max(S.endOffset(), MaxSplitSliceEndOffset); 516 } 517 518 // Start from the end of the previous partition. 519 P.SI = P.SJ; 520 521 // If P.SI is now at the end, we at most have a tail of split slices. 522 if (P.SI == SE) { 523 P.BeginOffset = P.EndOffset; 524 P.EndOffset = MaxSplitSliceEndOffset; 525 return; 526 } 527 528 // If the we have split slices and the next slice is after a gap and is 529 // not splittable immediately form an empty partition for the split 530 // slices up until the next slice begins. 531 if (!P.SplitTails.empty() && P.SI->beginOffset() != P.EndOffset && 532 !P.SI->isSplittable()) { 533 P.BeginOffset = P.EndOffset; 534 P.EndOffset = P.SI->beginOffset(); 535 return; 536 } 537 } 538 539 // OK, we need to consume new slices. Set the end offset based on the 540 // current slice, and step SJ past it. The beginning offset of the 541 // partition is the beginning offset of the next slice unless we have 542 // pre-existing split slices that are continuing, in which case we begin 543 // at the prior end offset. 544 P.BeginOffset = P.SplitTails.empty() ? P.SI->beginOffset() : P.EndOffset; 545 P.EndOffset = P.SI->endOffset(); 546 ++P.SJ; 547 548 // There are two strategies to form a partition based on whether the 549 // partition starts with an unsplittable slice or a splittable slice. 550 if (!P.SI->isSplittable()) { 551 // When we're forming an unsplittable region, it must always start at 552 // the first slice and will extend through its end. 553 assert(P.BeginOffset == P.SI->beginOffset()); 554 555 // Form a partition including all of the overlapping slices with this 556 // unsplittable slice. 557 while (P.SJ != SE && P.SJ->beginOffset() < P.EndOffset) { 558 if (!P.SJ->isSplittable()) 559 P.EndOffset = std::max(P.EndOffset, P.SJ->endOffset()); 560 ++P.SJ; 561 } 562 563 // We have a partition across a set of overlapping unsplittable 564 // partitions. 565 return; 566 } 567 568 // If we're starting with a splittable slice, then we need to form 569 // a synthetic partition spanning it and any other overlapping splittable 570 // splices. 571 assert(P.SI->isSplittable() && "Forming a splittable partition!"); 572 573 // Collect all of the overlapping splittable slices. 574 while (P.SJ != SE && P.SJ->beginOffset() < P.EndOffset && 575 P.SJ->isSplittable()) { 576 P.EndOffset = std::max(P.EndOffset, P.SJ->endOffset()); 577 ++P.SJ; 578 } 579 580 // Back upiP.EndOffset if we ended the span early when encountering an 581 // unsplittable slice. This synthesizes the early end offset of 582 // a partition spanning only splittable slices. 583 if (P.SJ != SE && P.SJ->beginOffset() < P.EndOffset) { 584 assert(!P.SJ->isSplittable()); 585 P.EndOffset = P.SJ->beginOffset(); 586 } 587 } 588 589 public: 590 bool operator==(const partition_iterator &RHS) const { 591 assert(SE == RHS.SE && 592 "End iterators don't match between compared partition iterators!"); 593 594 // The observed positions of partitions is marked by the P.SI iterator and 595 // the emptiness of the split slices. The latter is only relevant when 596 // P.SI == SE, as the end iterator will additionally have an empty split 597 // slices list, but the prior may have the same P.SI and a tail of split 598 // slices. 599 if (P.SI == RHS.P.SI && P.SplitTails.empty() == RHS.P.SplitTails.empty()) { 600 assert(P.SJ == RHS.P.SJ && 601 "Same set of slices formed two different sized partitions!"); 602 assert(P.SplitTails.size() == RHS.P.SplitTails.size() && 603 "Same slice position with differently sized non-empty split " 604 "slice tails!"); 605 return true; 606 } 607 return false; 608 } 609 610 partition_iterator &operator++() { 611 advance(); 612 return *this; 613 } 614 615 Partition &operator*() { return P; } 616 }; 617 618 /// A forward range over the partitions of the alloca's slices. 619 /// 620 /// This accesses an iterator range over the partitions of the alloca's 621 /// slices. It computes these partitions on the fly based on the overlapping 622 /// offsets of the slices and the ability to split them. It will visit "empty" 623 /// partitions to cover regions of the alloca only accessed via split 624 /// slices. 625 iterator_range<AllocaSlices::partition_iterator> AllocaSlices::partitions() { 626 return make_range(partition_iterator(begin(), end()), 627 partition_iterator(end(), end())); 628 } 629 630 static Value *foldSelectInst(SelectInst &SI) { 631 // If the condition being selected on is a constant or the same value is 632 // being selected between, fold the select. Yes this does (rarely) happen 633 // early on. 634 if (ConstantInt *CI = dyn_cast<ConstantInt>(SI.getCondition())) 635 return SI.getOperand(1 + CI->isZero()); 636 if (SI.getOperand(1) == SI.getOperand(2)) 637 return SI.getOperand(1); 638 639 return nullptr; 640 } 641 642 /// A helper that folds a PHI node or a select. 643 static Value *foldPHINodeOrSelectInst(Instruction &I) { 644 if (PHINode *PN = dyn_cast<PHINode>(&I)) { 645 // If PN merges together the same value, return that value. 646 return PN->hasConstantValue(); 647 } 648 return foldSelectInst(cast<SelectInst>(I)); 649 } 650 651 /// Builder for the alloca slices. 652 /// 653 /// This class builds a set of alloca slices by recursively visiting the uses 654 /// of an alloca and making a slice for each load and store at each offset. 655 class AllocaSlices::SliceBuilder : public PtrUseVisitor<SliceBuilder> { 656 friend class PtrUseVisitor<SliceBuilder>; 657 friend class InstVisitor<SliceBuilder>; 658 659 using Base = PtrUseVisitor<SliceBuilder>; 660 661 const uint64_t AllocSize; 662 AllocaSlices &AS; 663 664 SmallDenseMap<Instruction *, unsigned> MemTransferSliceMap; 665 SmallDenseMap<Instruction *, uint64_t> PHIOrSelectSizes; 666 667 /// Set to de-duplicate dead instructions found in the use walk. 668 SmallPtrSet<Instruction *, 4> VisitedDeadInsts; 669 670 public: 671 SliceBuilder(const DataLayout &DL, AllocaInst &AI, AllocaSlices &AS) 672 : PtrUseVisitor<SliceBuilder>(DL), 673 AllocSize(DL.getTypeAllocSize(AI.getAllocatedType())), AS(AS) {} 674 675 private: 676 void markAsDead(Instruction &I) { 677 if (VisitedDeadInsts.insert(&I).second) 678 AS.DeadUsers.push_back(&I); 679 } 680 681 void insertUse(Instruction &I, const APInt &Offset, uint64_t Size, 682 bool IsSplittable = false) { 683 // Completely skip uses which have a zero size or start either before or 684 // past the end of the allocation. 685 if (Size == 0 || Offset.uge(AllocSize)) { 686 LLVM_DEBUG(dbgs() << "WARNING: Ignoring " << Size << " byte use @" 687 << Offset 688 << " which has zero size or starts outside of the " 689 << AllocSize << " byte alloca:\n" 690 << " alloca: " << AS.AI << "\n" 691 << " use: " << I << "\n"); 692 return markAsDead(I); 693 } 694 695 uint64_t BeginOffset = Offset.getZExtValue(); 696 uint64_t EndOffset = BeginOffset + Size; 697 698 // Clamp the end offset to the end of the allocation. Note that this is 699 // formulated to handle even the case where "BeginOffset + Size" overflows. 700 // This may appear superficially to be something we could ignore entirely, 701 // but that is not so! There may be widened loads or PHI-node uses where 702 // some instructions are dead but not others. We can't completely ignore 703 // them, and so have to record at least the information here. 704 assert(AllocSize >= BeginOffset); // Established above. 705 if (Size > AllocSize - BeginOffset) { 706 LLVM_DEBUG(dbgs() << "WARNING: Clamping a " << Size << " byte use @" 707 << Offset << " to remain within the " << AllocSize 708 << " byte alloca:\n" 709 << " alloca: " << AS.AI << "\n" 710 << " use: " << I << "\n"); 711 EndOffset = AllocSize; 712 } 713 714 AS.Slices.push_back(Slice(BeginOffset, EndOffset, U, IsSplittable)); 715 } 716 717 void visitBitCastInst(BitCastInst &BC) { 718 if (BC.use_empty()) 719 return markAsDead(BC); 720 721 return Base::visitBitCastInst(BC); 722 } 723 724 void visitGetElementPtrInst(GetElementPtrInst &GEPI) { 725 if (GEPI.use_empty()) 726 return markAsDead(GEPI); 727 728 if (SROAStrictInbounds && GEPI.isInBounds()) { 729 // FIXME: This is a manually un-factored variant of the basic code inside 730 // of GEPs with checking of the inbounds invariant specified in the 731 // langref in a very strict sense. If we ever want to enable 732 // SROAStrictInbounds, this code should be factored cleanly into 733 // PtrUseVisitor, but it is easier to experiment with SROAStrictInbounds 734 // by writing out the code here where we have the underlying allocation 735 // size readily available. 736 APInt GEPOffset = Offset; 737 const DataLayout &DL = GEPI.getModule()->getDataLayout(); 738 for (gep_type_iterator GTI = gep_type_begin(GEPI), 739 GTE = gep_type_end(GEPI); 740 GTI != GTE; ++GTI) { 741 ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand()); 742 if (!OpC) 743 break; 744 745 // Handle a struct index, which adds its field offset to the pointer. 746 if (StructType *STy = GTI.getStructTypeOrNull()) { 747 unsigned ElementIdx = OpC->getZExtValue(); 748 const StructLayout *SL = DL.getStructLayout(STy); 749 GEPOffset += 750 APInt(Offset.getBitWidth(), SL->getElementOffset(ElementIdx)); 751 } else { 752 // For array or vector indices, scale the index by the size of the 753 // type. 754 APInt Index = OpC->getValue().sextOrTrunc(Offset.getBitWidth()); 755 GEPOffset += Index * APInt(Offset.getBitWidth(), 756 DL.getTypeAllocSize(GTI.getIndexedType())); 757 } 758 759 // If this index has computed an intermediate pointer which is not 760 // inbounds, then the result of the GEP is a poison value and we can 761 // delete it and all uses. 762 if (GEPOffset.ugt(AllocSize)) 763 return markAsDead(GEPI); 764 } 765 } 766 767 return Base::visitGetElementPtrInst(GEPI); 768 } 769 770 void handleLoadOrStore(Type *Ty, Instruction &I, const APInt &Offset, 771 uint64_t Size, bool IsVolatile) { 772 // We allow splitting of non-volatile loads and stores where the type is an 773 // integer type. These may be used to implement 'memcpy' or other "transfer 774 // of bits" patterns. 775 bool IsSplittable = Ty->isIntegerTy() && !IsVolatile; 776 777 insertUse(I, Offset, Size, IsSplittable); 778 } 779 780 void visitLoadInst(LoadInst &LI) { 781 assert((!LI.isSimple() || LI.getType()->isSingleValueType()) && 782 "All simple FCA loads should have been pre-split"); 783 784 if (!IsOffsetKnown) 785 return PI.setAborted(&LI); 786 787 const DataLayout &DL = LI.getModule()->getDataLayout(); 788 uint64_t Size = DL.getTypeStoreSize(LI.getType()); 789 return handleLoadOrStore(LI.getType(), LI, Offset, Size, LI.isVolatile()); 790 } 791 792 void visitStoreInst(StoreInst &SI) { 793 Value *ValOp = SI.getValueOperand(); 794 if (ValOp == *U) 795 return PI.setEscapedAndAborted(&SI); 796 if (!IsOffsetKnown) 797 return PI.setAborted(&SI); 798 799 const DataLayout &DL = SI.getModule()->getDataLayout(); 800 uint64_t Size = DL.getTypeStoreSize(ValOp->getType()); 801 802 // If this memory access can be shown to *statically* extend outside the 803 // bounds of the allocation, it's behavior is undefined, so simply 804 // ignore it. Note that this is more strict than the generic clamping 805 // behavior of insertUse. We also try to handle cases which might run the 806 // risk of overflow. 807 // FIXME: We should instead consider the pointer to have escaped if this 808 // function is being instrumented for addressing bugs or race conditions. 809 if (Size > AllocSize || Offset.ugt(AllocSize - Size)) { 810 LLVM_DEBUG(dbgs() << "WARNING: Ignoring " << Size << " byte store @" 811 << Offset << " which extends past the end of the " 812 << AllocSize << " byte alloca:\n" 813 << " alloca: " << AS.AI << "\n" 814 << " use: " << SI << "\n"); 815 return markAsDead(SI); 816 } 817 818 assert((!SI.isSimple() || ValOp->getType()->isSingleValueType()) && 819 "All simple FCA stores should have been pre-split"); 820 handleLoadOrStore(ValOp->getType(), SI, Offset, Size, SI.isVolatile()); 821 } 822 823 void visitMemSetInst(MemSetInst &II) { 824 assert(II.getRawDest() == *U && "Pointer use is not the destination?"); 825 ConstantInt *Length = dyn_cast<ConstantInt>(II.getLength()); 826 if ((Length && Length->getValue() == 0) || 827 (IsOffsetKnown && Offset.uge(AllocSize))) 828 // Zero-length mem transfer intrinsics can be ignored entirely. 829 return markAsDead(II); 830 831 if (!IsOffsetKnown) 832 return PI.setAborted(&II); 833 834 insertUse(II, Offset, Length ? Length->getLimitedValue() 835 : AllocSize - Offset.getLimitedValue(), 836 (bool)Length); 837 } 838 839 void visitMemTransferInst(MemTransferInst &II) { 840 ConstantInt *Length = dyn_cast<ConstantInt>(II.getLength()); 841 if (Length && Length->getValue() == 0) 842 // Zero-length mem transfer intrinsics can be ignored entirely. 843 return markAsDead(II); 844 845 // Because we can visit these intrinsics twice, also check to see if the 846 // first time marked this instruction as dead. If so, skip it. 847 if (VisitedDeadInsts.count(&II)) 848 return; 849 850 if (!IsOffsetKnown) 851 return PI.setAborted(&II); 852 853 // This side of the transfer is completely out-of-bounds, and so we can 854 // nuke the entire transfer. However, we also need to nuke the other side 855 // if already added to our partitions. 856 // FIXME: Yet another place we really should bypass this when 857 // instrumenting for ASan. 858 if (Offset.uge(AllocSize)) { 859 SmallDenseMap<Instruction *, unsigned>::iterator MTPI = 860 MemTransferSliceMap.find(&II); 861 if (MTPI != MemTransferSliceMap.end()) 862 AS.Slices[MTPI->second].kill(); 863 return markAsDead(II); 864 } 865 866 uint64_t RawOffset = Offset.getLimitedValue(); 867 uint64_t Size = Length ? Length->getLimitedValue() : AllocSize - RawOffset; 868 869 // Check for the special case where the same exact value is used for both 870 // source and dest. 871 if (*U == II.getRawDest() && *U == II.getRawSource()) { 872 // For non-volatile transfers this is a no-op. 873 if (!II.isVolatile()) 874 return markAsDead(II); 875 876 return insertUse(II, Offset, Size, /*IsSplittable=*/false); 877 } 878 879 // If we have seen both source and destination for a mem transfer, then 880 // they both point to the same alloca. 881 bool Inserted; 882 SmallDenseMap<Instruction *, unsigned>::iterator MTPI; 883 std::tie(MTPI, Inserted) = 884 MemTransferSliceMap.insert(std::make_pair(&II, AS.Slices.size())); 885 unsigned PrevIdx = MTPI->second; 886 if (!Inserted) { 887 Slice &PrevP = AS.Slices[PrevIdx]; 888 889 // Check if the begin offsets match and this is a non-volatile transfer. 890 // In that case, we can completely elide the transfer. 891 if (!II.isVolatile() && PrevP.beginOffset() == RawOffset) { 892 PrevP.kill(); 893 return markAsDead(II); 894 } 895 896 // Otherwise we have an offset transfer within the same alloca. We can't 897 // split those. 898 PrevP.makeUnsplittable(); 899 } 900 901 // Insert the use now that we've fixed up the splittable nature. 902 insertUse(II, Offset, Size, /*IsSplittable=*/Inserted && Length); 903 904 // Check that we ended up with a valid index in the map. 905 assert(AS.Slices[PrevIdx].getUse()->getUser() == &II && 906 "Map index doesn't point back to a slice with this user."); 907 } 908 909 // Disable SRoA for any intrinsics except for lifetime invariants. 910 // FIXME: What about debug intrinsics? This matches old behavior, but 911 // doesn't make sense. 912 void visitIntrinsicInst(IntrinsicInst &II) { 913 if (!IsOffsetKnown) 914 return PI.setAborted(&II); 915 916 if (II.getIntrinsicID() == Intrinsic::lifetime_start || 917 II.getIntrinsicID() == Intrinsic::lifetime_end) { 918 ConstantInt *Length = cast<ConstantInt>(II.getArgOperand(0)); 919 uint64_t Size = std::min(AllocSize - Offset.getLimitedValue(), 920 Length->getLimitedValue()); 921 insertUse(II, Offset, Size, true); 922 return; 923 } 924 925 Base::visitIntrinsicInst(II); 926 } 927 928 Instruction *hasUnsafePHIOrSelectUse(Instruction *Root, uint64_t &Size) { 929 // We consider any PHI or select that results in a direct load or store of 930 // the same offset to be a viable use for slicing purposes. These uses 931 // are considered unsplittable and the size is the maximum loaded or stored 932 // size. 933 SmallPtrSet<Instruction *, 4> Visited; 934 SmallVector<std::pair<Instruction *, Instruction *>, 4> Uses; 935 Visited.insert(Root); 936 Uses.push_back(std::make_pair(cast<Instruction>(*U), Root)); 937 const DataLayout &DL = Root->getModule()->getDataLayout(); 938 // If there are no loads or stores, the access is dead. We mark that as 939 // a size zero access. 940 Size = 0; 941 do { 942 Instruction *I, *UsedI; 943 std::tie(UsedI, I) = Uses.pop_back_val(); 944 945 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 946 Size = std::max(Size, DL.getTypeStoreSize(LI->getType())); 947 continue; 948 } 949 if (StoreInst *SI = dyn_cast<StoreInst>(I)) { 950 Value *Op = SI->getOperand(0); 951 if (Op == UsedI) 952 return SI; 953 Size = std::max(Size, DL.getTypeStoreSize(Op->getType())); 954 continue; 955 } 956 957 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I)) { 958 if (!GEP->hasAllZeroIndices()) 959 return GEP; 960 } else if (!isa<BitCastInst>(I) && !isa<PHINode>(I) && 961 !isa<SelectInst>(I)) { 962 return I; 963 } 964 965 for (User *U : I->users()) 966 if (Visited.insert(cast<Instruction>(U)).second) 967 Uses.push_back(std::make_pair(I, cast<Instruction>(U))); 968 } while (!Uses.empty()); 969 970 return nullptr; 971 } 972 973 void visitPHINodeOrSelectInst(Instruction &I) { 974 assert(isa<PHINode>(I) || isa<SelectInst>(I)); 975 if (I.use_empty()) 976 return markAsDead(I); 977 978 // TODO: We could use SimplifyInstruction here to fold PHINodes and 979 // SelectInsts. However, doing so requires to change the current 980 // dead-operand-tracking mechanism. For instance, suppose neither loading 981 // from %U nor %other traps. Then "load (select undef, %U, %other)" does not 982 // trap either. However, if we simply replace %U with undef using the 983 // current dead-operand-tracking mechanism, "load (select undef, undef, 984 // %other)" may trap because the select may return the first operand 985 // "undef". 986 if (Value *Result = foldPHINodeOrSelectInst(I)) { 987 if (Result == *U) 988 // If the result of the constant fold will be the pointer, recurse 989 // through the PHI/select as if we had RAUW'ed it. 990 enqueueUsers(I); 991 else 992 // Otherwise the operand to the PHI/select is dead, and we can replace 993 // it with undef. 994 AS.DeadOperands.push_back(U); 995 996 return; 997 } 998 999 if (!IsOffsetKnown) 1000 return PI.setAborted(&I); 1001 1002 // See if we already have computed info on this node. 1003 uint64_t &Size = PHIOrSelectSizes[&I]; 1004 if (!Size) { 1005 // This is a new PHI/Select, check for an unsafe use of it. 1006 if (Instruction *UnsafeI = hasUnsafePHIOrSelectUse(&I, Size)) 1007 return PI.setAborted(UnsafeI); 1008 } 1009 1010 // For PHI and select operands outside the alloca, we can't nuke the entire 1011 // phi or select -- the other side might still be relevant, so we special 1012 // case them here and use a separate structure to track the operands 1013 // themselves which should be replaced with undef. 1014 // FIXME: This should instead be escaped in the event we're instrumenting 1015 // for address sanitization. 1016 if (Offset.uge(AllocSize)) { 1017 AS.DeadOperands.push_back(U); 1018 return; 1019 } 1020 1021 insertUse(I, Offset, Size); 1022 } 1023 1024 void visitPHINode(PHINode &PN) { visitPHINodeOrSelectInst(PN); } 1025 1026 void visitSelectInst(SelectInst &SI) { visitPHINodeOrSelectInst(SI); } 1027 1028 /// Disable SROA entirely if there are unhandled users of the alloca. 1029 void visitInstruction(Instruction &I) { PI.setAborted(&I); } 1030 }; 1031 1032 AllocaSlices::AllocaSlices(const DataLayout &DL, AllocaInst &AI) 1033 : 1034 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1035 AI(AI), 1036 #endif 1037 PointerEscapingInstr(nullptr) { 1038 SliceBuilder PB(DL, AI, *this); 1039 SliceBuilder::PtrInfo PtrI = PB.visitPtr(AI); 1040 if (PtrI.isEscaped() || PtrI.isAborted()) { 1041 // FIXME: We should sink the escape vs. abort info into the caller nicely, 1042 // possibly by just storing the PtrInfo in the AllocaSlices. 1043 PointerEscapingInstr = PtrI.getEscapingInst() ? PtrI.getEscapingInst() 1044 : PtrI.getAbortingInst(); 1045 assert(PointerEscapingInstr && "Did not track a bad instruction"); 1046 return; 1047 } 1048 1049 Slices.erase( 1050 llvm::remove_if(Slices, [](const Slice &S) { return S.isDead(); }), 1051 Slices.end()); 1052 1053 #ifndef NDEBUG 1054 if (SROARandomShuffleSlices) { 1055 std::mt19937 MT(static_cast<unsigned>( 1056 std::chrono::system_clock::now().time_since_epoch().count())); 1057 std::shuffle(Slices.begin(), Slices.end(), MT); 1058 } 1059 #endif 1060 1061 // Sort the uses. This arranges for the offsets to be in ascending order, 1062 // and the sizes to be in descending order. 1063 llvm::sort(Slices); 1064 } 1065 1066 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1067 1068 void AllocaSlices::print(raw_ostream &OS, const_iterator I, 1069 StringRef Indent) const { 1070 printSlice(OS, I, Indent); 1071 OS << "\n"; 1072 printUse(OS, I, Indent); 1073 } 1074 1075 void AllocaSlices::printSlice(raw_ostream &OS, const_iterator I, 1076 StringRef Indent) const { 1077 OS << Indent << "[" << I->beginOffset() << "," << I->endOffset() << ")" 1078 << " slice #" << (I - begin()) 1079 << (I->isSplittable() ? " (splittable)" : ""); 1080 } 1081 1082 void AllocaSlices::printUse(raw_ostream &OS, const_iterator I, 1083 StringRef Indent) const { 1084 OS << Indent << " used by: " << *I->getUse()->getUser() << "\n"; 1085 } 1086 1087 void AllocaSlices::print(raw_ostream &OS) const { 1088 if (PointerEscapingInstr) { 1089 OS << "Can't analyze slices for alloca: " << AI << "\n" 1090 << " A pointer to this alloca escaped by:\n" 1091 << " " << *PointerEscapingInstr << "\n"; 1092 return; 1093 } 1094 1095 OS << "Slices of alloca: " << AI << "\n"; 1096 for (const_iterator I = begin(), E = end(); I != E; ++I) 1097 print(OS, I); 1098 } 1099 1100 LLVM_DUMP_METHOD void AllocaSlices::dump(const_iterator I) const { 1101 print(dbgs(), I); 1102 } 1103 LLVM_DUMP_METHOD void AllocaSlices::dump() const { print(dbgs()); } 1104 1105 #endif // !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1106 1107 /// Walk the range of a partitioning looking for a common type to cover this 1108 /// sequence of slices. 1109 static Type *findCommonType(AllocaSlices::const_iterator B, 1110 AllocaSlices::const_iterator E, 1111 uint64_t EndOffset) { 1112 Type *Ty = nullptr; 1113 bool TyIsCommon = true; 1114 IntegerType *ITy = nullptr; 1115 1116 // Note that we need to look at *every* alloca slice's Use to ensure we 1117 // always get consistent results regardless of the order of slices. 1118 for (AllocaSlices::const_iterator I = B; I != E; ++I) { 1119 Use *U = I->getUse(); 1120 if (isa<IntrinsicInst>(*U->getUser())) 1121 continue; 1122 if (I->beginOffset() != B->beginOffset() || I->endOffset() != EndOffset) 1123 continue; 1124 1125 Type *UserTy = nullptr; 1126 if (LoadInst *LI = dyn_cast<LoadInst>(U->getUser())) { 1127 UserTy = LI->getType(); 1128 } else if (StoreInst *SI = dyn_cast<StoreInst>(U->getUser())) { 1129 UserTy = SI->getValueOperand()->getType(); 1130 } 1131 1132 if (IntegerType *UserITy = dyn_cast_or_null<IntegerType>(UserTy)) { 1133 // If the type is larger than the partition, skip it. We only encounter 1134 // this for split integer operations where we want to use the type of the 1135 // entity causing the split. Also skip if the type is not a byte width 1136 // multiple. 1137 if (UserITy->getBitWidth() % 8 != 0 || 1138 UserITy->getBitWidth() / 8 > (EndOffset - B->beginOffset())) 1139 continue; 1140 1141 // Track the largest bitwidth integer type used in this way in case there 1142 // is no common type. 1143 if (!ITy || ITy->getBitWidth() < UserITy->getBitWidth()) 1144 ITy = UserITy; 1145 } 1146 1147 // To avoid depending on the order of slices, Ty and TyIsCommon must not 1148 // depend on types skipped above. 1149 if (!UserTy || (Ty && Ty != UserTy)) 1150 TyIsCommon = false; // Give up on anything but an iN type. 1151 else 1152 Ty = UserTy; 1153 } 1154 1155 return TyIsCommon ? Ty : ITy; 1156 } 1157 1158 /// PHI instructions that use an alloca and are subsequently loaded can be 1159 /// rewritten to load both input pointers in the pred blocks and then PHI the 1160 /// results, allowing the load of the alloca to be promoted. 1161 /// From this: 1162 /// %P2 = phi [i32* %Alloca, i32* %Other] 1163 /// %V = load i32* %P2 1164 /// to: 1165 /// %V1 = load i32* %Alloca -> will be mem2reg'd 1166 /// ... 1167 /// %V2 = load i32* %Other 1168 /// ... 1169 /// %V = phi [i32 %V1, i32 %V2] 1170 /// 1171 /// We can do this to a select if its only uses are loads and if the operands 1172 /// to the select can be loaded unconditionally. 1173 /// 1174 /// FIXME: This should be hoisted into a generic utility, likely in 1175 /// Transforms/Util/Local.h 1176 static bool isSafePHIToSpeculate(PHINode &PN) { 1177 // For now, we can only do this promotion if the load is in the same block 1178 // as the PHI, and if there are no stores between the phi and load. 1179 // TODO: Allow recursive phi users. 1180 // TODO: Allow stores. 1181 BasicBlock *BB = PN.getParent(); 1182 unsigned MaxAlign = 0; 1183 bool HaveLoad = false; 1184 for (User *U : PN.users()) { 1185 LoadInst *LI = dyn_cast<LoadInst>(U); 1186 if (!LI || !LI->isSimple()) 1187 return false; 1188 1189 // For now we only allow loads in the same block as the PHI. This is 1190 // a common case that happens when instcombine merges two loads through 1191 // a PHI. 1192 if (LI->getParent() != BB) 1193 return false; 1194 1195 // Ensure that there are no instructions between the PHI and the load that 1196 // could store. 1197 for (BasicBlock::iterator BBI(PN); &*BBI != LI; ++BBI) 1198 if (BBI->mayWriteToMemory()) 1199 return false; 1200 1201 MaxAlign = std::max(MaxAlign, LI->getAlignment()); 1202 HaveLoad = true; 1203 } 1204 1205 if (!HaveLoad) 1206 return false; 1207 1208 const DataLayout &DL = PN.getModule()->getDataLayout(); 1209 1210 // We can only transform this if it is safe to push the loads into the 1211 // predecessor blocks. The only thing to watch out for is that we can't put 1212 // a possibly trapping load in the predecessor if it is a critical edge. 1213 for (unsigned Idx = 0, Num = PN.getNumIncomingValues(); Idx != Num; ++Idx) { 1214 Instruction *TI = PN.getIncomingBlock(Idx)->getTerminator(); 1215 Value *InVal = PN.getIncomingValue(Idx); 1216 1217 // If the value is produced by the terminator of the predecessor (an 1218 // invoke) or it has side-effects, there is no valid place to put a load 1219 // in the predecessor. 1220 if (TI == InVal || TI->mayHaveSideEffects()) 1221 return false; 1222 1223 // If the predecessor has a single successor, then the edge isn't 1224 // critical. 1225 if (TI->getNumSuccessors() == 1) 1226 continue; 1227 1228 // If this pointer is always safe to load, or if we can prove that there 1229 // is already a load in the block, then we can move the load to the pred 1230 // block. 1231 if (isSafeToLoadUnconditionally(InVal, MaxAlign, DL, TI)) 1232 continue; 1233 1234 return false; 1235 } 1236 1237 return true; 1238 } 1239 1240 static void speculatePHINodeLoads(PHINode &PN) { 1241 LLVM_DEBUG(dbgs() << " original: " << PN << "\n"); 1242 1243 Type *LoadTy = cast<PointerType>(PN.getType())->getElementType(); 1244 IRBuilderTy PHIBuilder(&PN); 1245 PHINode *NewPN = PHIBuilder.CreatePHI(LoadTy, PN.getNumIncomingValues(), 1246 PN.getName() + ".sroa.speculated"); 1247 1248 // Get the AA tags and alignment to use from one of the loads. It doesn't 1249 // matter which one we get and if any differ. 1250 LoadInst *SomeLoad = cast<LoadInst>(PN.user_back()); 1251 1252 AAMDNodes AATags; 1253 SomeLoad->getAAMetadata(AATags); 1254 unsigned Align = SomeLoad->getAlignment(); 1255 1256 // Rewrite all loads of the PN to use the new PHI. 1257 while (!PN.use_empty()) { 1258 LoadInst *LI = cast<LoadInst>(PN.user_back()); 1259 LI->replaceAllUsesWith(NewPN); 1260 LI->eraseFromParent(); 1261 } 1262 1263 // Inject loads into all of the pred blocks. 1264 DenseMap<BasicBlock*, Value*> InjectedLoads; 1265 for (unsigned Idx = 0, Num = PN.getNumIncomingValues(); Idx != Num; ++Idx) { 1266 BasicBlock *Pred = PN.getIncomingBlock(Idx); 1267 Value *InVal = PN.getIncomingValue(Idx); 1268 1269 // A PHI node is allowed to have multiple (duplicated) entries for the same 1270 // basic block, as long as the value is the same. So if we already injected 1271 // a load in the predecessor, then we should reuse the same load for all 1272 // duplicated entries. 1273 if (Value* V = InjectedLoads.lookup(Pred)) { 1274 NewPN->addIncoming(V, Pred); 1275 continue; 1276 } 1277 1278 Instruction *TI = Pred->getTerminator(); 1279 IRBuilderTy PredBuilder(TI); 1280 1281 LoadInst *Load = PredBuilder.CreateLoad( 1282 InVal, (PN.getName() + ".sroa.speculate.load." + Pred->getName())); 1283 ++NumLoadsSpeculated; 1284 Load->setAlignment(Align); 1285 if (AATags) 1286 Load->setAAMetadata(AATags); 1287 NewPN->addIncoming(Load, Pred); 1288 InjectedLoads[Pred] = Load; 1289 } 1290 1291 LLVM_DEBUG(dbgs() << " speculated to: " << *NewPN << "\n"); 1292 PN.eraseFromParent(); 1293 } 1294 1295 /// Select instructions that use an alloca and are subsequently loaded can be 1296 /// rewritten to load both input pointers and then select between the result, 1297 /// allowing the load of the alloca to be promoted. 1298 /// From this: 1299 /// %P2 = select i1 %cond, i32* %Alloca, i32* %Other 1300 /// %V = load i32* %P2 1301 /// to: 1302 /// %V1 = load i32* %Alloca -> will be mem2reg'd 1303 /// %V2 = load i32* %Other 1304 /// %V = select i1 %cond, i32 %V1, i32 %V2 1305 /// 1306 /// We can do this to a select if its only uses are loads and if the operand 1307 /// to the select can be loaded unconditionally. 1308 static bool isSafeSelectToSpeculate(SelectInst &SI) { 1309 Value *TValue = SI.getTrueValue(); 1310 Value *FValue = SI.getFalseValue(); 1311 const DataLayout &DL = SI.getModule()->getDataLayout(); 1312 1313 for (User *U : SI.users()) { 1314 LoadInst *LI = dyn_cast<LoadInst>(U); 1315 if (!LI || !LI->isSimple()) 1316 return false; 1317 1318 // Both operands to the select need to be dereferenceable, either 1319 // absolutely (e.g. allocas) or at this point because we can see other 1320 // accesses to it. 1321 if (!isSafeToLoadUnconditionally(TValue, LI->getAlignment(), DL, LI)) 1322 return false; 1323 if (!isSafeToLoadUnconditionally(FValue, LI->getAlignment(), DL, LI)) 1324 return false; 1325 } 1326 1327 return true; 1328 } 1329 1330 static void speculateSelectInstLoads(SelectInst &SI) { 1331 LLVM_DEBUG(dbgs() << " original: " << SI << "\n"); 1332 1333 IRBuilderTy IRB(&SI); 1334 Value *TV = SI.getTrueValue(); 1335 Value *FV = SI.getFalseValue(); 1336 // Replace the loads of the select with a select of two loads. 1337 while (!SI.use_empty()) { 1338 LoadInst *LI = cast<LoadInst>(SI.user_back()); 1339 assert(LI->isSimple() && "We only speculate simple loads"); 1340 1341 IRB.SetInsertPoint(LI); 1342 LoadInst *TL = 1343 IRB.CreateLoad(TV, LI->getName() + ".sroa.speculate.load.true"); 1344 LoadInst *FL = 1345 IRB.CreateLoad(FV, LI->getName() + ".sroa.speculate.load.false"); 1346 NumLoadsSpeculated += 2; 1347 1348 // Transfer alignment and AA info if present. 1349 TL->setAlignment(LI->getAlignment()); 1350 FL->setAlignment(LI->getAlignment()); 1351 1352 AAMDNodes Tags; 1353 LI->getAAMetadata(Tags); 1354 if (Tags) { 1355 TL->setAAMetadata(Tags); 1356 FL->setAAMetadata(Tags); 1357 } 1358 1359 Value *V = IRB.CreateSelect(SI.getCondition(), TL, FL, 1360 LI->getName() + ".sroa.speculated"); 1361 1362 LLVM_DEBUG(dbgs() << " speculated to: " << *V << "\n"); 1363 LI->replaceAllUsesWith(V); 1364 LI->eraseFromParent(); 1365 } 1366 SI.eraseFromParent(); 1367 } 1368 1369 /// Build a GEP out of a base pointer and indices. 1370 /// 1371 /// This will return the BasePtr if that is valid, or build a new GEP 1372 /// instruction using the IRBuilder if GEP-ing is needed. 1373 static Value *buildGEP(IRBuilderTy &IRB, Value *BasePtr, 1374 SmallVectorImpl<Value *> &Indices, Twine NamePrefix) { 1375 if (Indices.empty()) 1376 return BasePtr; 1377 1378 // A single zero index is a no-op, so check for this and avoid building a GEP 1379 // in that case. 1380 if (Indices.size() == 1 && cast<ConstantInt>(Indices.back())->isZero()) 1381 return BasePtr; 1382 1383 return IRB.CreateInBoundsGEP(nullptr, BasePtr, Indices, 1384 NamePrefix + "sroa_idx"); 1385 } 1386 1387 /// Get a natural GEP off of the BasePtr walking through Ty toward 1388 /// TargetTy without changing the offset of the pointer. 1389 /// 1390 /// This routine assumes we've already established a properly offset GEP with 1391 /// Indices, and arrived at the Ty type. The goal is to continue to GEP with 1392 /// zero-indices down through type layers until we find one the same as 1393 /// TargetTy. If we can't find one with the same type, we at least try to use 1394 /// one with the same size. If none of that works, we just produce the GEP as 1395 /// indicated by Indices to have the correct offset. 1396 static Value *getNaturalGEPWithType(IRBuilderTy &IRB, const DataLayout &DL, 1397 Value *BasePtr, Type *Ty, Type *TargetTy, 1398 SmallVectorImpl<Value *> &Indices, 1399 Twine NamePrefix) { 1400 if (Ty == TargetTy) 1401 return buildGEP(IRB, BasePtr, Indices, NamePrefix); 1402 1403 // Offset size to use for the indices. 1404 unsigned OffsetSize = DL.getIndexTypeSizeInBits(BasePtr->getType()); 1405 1406 // See if we can descend into a struct and locate a field with the correct 1407 // type. 1408 unsigned NumLayers = 0; 1409 Type *ElementTy = Ty; 1410 do { 1411 if (ElementTy->isPointerTy()) 1412 break; 1413 1414 if (ArrayType *ArrayTy = dyn_cast<ArrayType>(ElementTy)) { 1415 ElementTy = ArrayTy->getElementType(); 1416 Indices.push_back(IRB.getIntN(OffsetSize, 0)); 1417 } else if (VectorType *VectorTy = dyn_cast<VectorType>(ElementTy)) { 1418 ElementTy = VectorTy->getElementType(); 1419 Indices.push_back(IRB.getInt32(0)); 1420 } else if (StructType *STy = dyn_cast<StructType>(ElementTy)) { 1421 if (STy->element_begin() == STy->element_end()) 1422 break; // Nothing left to descend into. 1423 ElementTy = *STy->element_begin(); 1424 Indices.push_back(IRB.getInt32(0)); 1425 } else { 1426 break; 1427 } 1428 ++NumLayers; 1429 } while (ElementTy != TargetTy); 1430 if (ElementTy != TargetTy) 1431 Indices.erase(Indices.end() - NumLayers, Indices.end()); 1432 1433 return buildGEP(IRB, BasePtr, Indices, NamePrefix); 1434 } 1435 1436 /// Recursively compute indices for a natural GEP. 1437 /// 1438 /// This is the recursive step for getNaturalGEPWithOffset that walks down the 1439 /// element types adding appropriate indices for the GEP. 1440 static Value *getNaturalGEPRecursively(IRBuilderTy &IRB, const DataLayout &DL, 1441 Value *Ptr, Type *Ty, APInt &Offset, 1442 Type *TargetTy, 1443 SmallVectorImpl<Value *> &Indices, 1444 Twine NamePrefix) { 1445 if (Offset == 0) 1446 return getNaturalGEPWithType(IRB, DL, Ptr, Ty, TargetTy, Indices, 1447 NamePrefix); 1448 1449 // We can't recurse through pointer types. 1450 if (Ty->isPointerTy()) 1451 return nullptr; 1452 1453 // We try to analyze GEPs over vectors here, but note that these GEPs are 1454 // extremely poorly defined currently. The long-term goal is to remove GEPing 1455 // over a vector from the IR completely. 1456 if (VectorType *VecTy = dyn_cast<VectorType>(Ty)) { 1457 unsigned ElementSizeInBits = DL.getTypeSizeInBits(VecTy->getScalarType()); 1458 if (ElementSizeInBits % 8 != 0) { 1459 // GEPs over non-multiple of 8 size vector elements are invalid. 1460 return nullptr; 1461 } 1462 APInt ElementSize(Offset.getBitWidth(), ElementSizeInBits / 8); 1463 APInt NumSkippedElements = Offset.sdiv(ElementSize); 1464 if (NumSkippedElements.ugt(VecTy->getNumElements())) 1465 return nullptr; 1466 Offset -= NumSkippedElements * ElementSize; 1467 Indices.push_back(IRB.getInt(NumSkippedElements)); 1468 return getNaturalGEPRecursively(IRB, DL, Ptr, VecTy->getElementType(), 1469 Offset, TargetTy, Indices, NamePrefix); 1470 } 1471 1472 if (ArrayType *ArrTy = dyn_cast<ArrayType>(Ty)) { 1473 Type *ElementTy = ArrTy->getElementType(); 1474 APInt ElementSize(Offset.getBitWidth(), DL.getTypeAllocSize(ElementTy)); 1475 APInt NumSkippedElements = Offset.sdiv(ElementSize); 1476 if (NumSkippedElements.ugt(ArrTy->getNumElements())) 1477 return nullptr; 1478 1479 Offset -= NumSkippedElements * ElementSize; 1480 Indices.push_back(IRB.getInt(NumSkippedElements)); 1481 return getNaturalGEPRecursively(IRB, DL, Ptr, ElementTy, Offset, TargetTy, 1482 Indices, NamePrefix); 1483 } 1484 1485 StructType *STy = dyn_cast<StructType>(Ty); 1486 if (!STy) 1487 return nullptr; 1488 1489 const StructLayout *SL = DL.getStructLayout(STy); 1490 uint64_t StructOffset = Offset.getZExtValue(); 1491 if (StructOffset >= SL->getSizeInBytes()) 1492 return nullptr; 1493 unsigned Index = SL->getElementContainingOffset(StructOffset); 1494 Offset -= APInt(Offset.getBitWidth(), SL->getElementOffset(Index)); 1495 Type *ElementTy = STy->getElementType(Index); 1496 if (Offset.uge(DL.getTypeAllocSize(ElementTy))) 1497 return nullptr; // The offset points into alignment padding. 1498 1499 Indices.push_back(IRB.getInt32(Index)); 1500 return getNaturalGEPRecursively(IRB, DL, Ptr, ElementTy, Offset, TargetTy, 1501 Indices, NamePrefix); 1502 } 1503 1504 /// Get a natural GEP from a base pointer to a particular offset and 1505 /// resulting in a particular type. 1506 /// 1507 /// The goal is to produce a "natural" looking GEP that works with the existing 1508 /// composite types to arrive at the appropriate offset and element type for 1509 /// a pointer. TargetTy is the element type the returned GEP should point-to if 1510 /// possible. We recurse by decreasing Offset, adding the appropriate index to 1511 /// Indices, and setting Ty to the result subtype. 1512 /// 1513 /// If no natural GEP can be constructed, this function returns null. 1514 static Value *getNaturalGEPWithOffset(IRBuilderTy &IRB, const DataLayout &DL, 1515 Value *Ptr, APInt Offset, Type *TargetTy, 1516 SmallVectorImpl<Value *> &Indices, 1517 Twine NamePrefix) { 1518 PointerType *Ty = cast<PointerType>(Ptr->getType()); 1519 1520 // Don't consider any GEPs through an i8* as natural unless the TargetTy is 1521 // an i8. 1522 if (Ty == IRB.getInt8PtrTy(Ty->getAddressSpace()) && TargetTy->isIntegerTy(8)) 1523 return nullptr; 1524 1525 Type *ElementTy = Ty->getElementType(); 1526 if (!ElementTy->isSized()) 1527 return nullptr; // We can't GEP through an unsized element. 1528 APInt ElementSize(Offset.getBitWidth(), DL.getTypeAllocSize(ElementTy)); 1529 if (ElementSize == 0) 1530 return nullptr; // Zero-length arrays can't help us build a natural GEP. 1531 APInt NumSkippedElements = Offset.sdiv(ElementSize); 1532 1533 Offset -= NumSkippedElements * ElementSize; 1534 Indices.push_back(IRB.getInt(NumSkippedElements)); 1535 return getNaturalGEPRecursively(IRB, DL, Ptr, ElementTy, Offset, TargetTy, 1536 Indices, NamePrefix); 1537 } 1538 1539 /// Compute an adjusted pointer from Ptr by Offset bytes where the 1540 /// resulting pointer has PointerTy. 1541 /// 1542 /// This tries very hard to compute a "natural" GEP which arrives at the offset 1543 /// and produces the pointer type desired. Where it cannot, it will try to use 1544 /// the natural GEP to arrive at the offset and bitcast to the type. Where that 1545 /// fails, it will try to use an existing i8* and GEP to the byte offset and 1546 /// bitcast to the type. 1547 /// 1548 /// The strategy for finding the more natural GEPs is to peel off layers of the 1549 /// pointer, walking back through bit casts and GEPs, searching for a base 1550 /// pointer from which we can compute a natural GEP with the desired 1551 /// properties. The algorithm tries to fold as many constant indices into 1552 /// a single GEP as possible, thus making each GEP more independent of the 1553 /// surrounding code. 1554 static Value *getAdjustedPtr(IRBuilderTy &IRB, const DataLayout &DL, Value *Ptr, 1555 APInt Offset, Type *PointerTy, Twine NamePrefix) { 1556 // Even though we don't look through PHI nodes, we could be called on an 1557 // instruction in an unreachable block, which may be on a cycle. 1558 SmallPtrSet<Value *, 4> Visited; 1559 Visited.insert(Ptr); 1560 SmallVector<Value *, 4> Indices; 1561 1562 // We may end up computing an offset pointer that has the wrong type. If we 1563 // never are able to compute one directly that has the correct type, we'll 1564 // fall back to it, so keep it and the base it was computed from around here. 1565 Value *OffsetPtr = nullptr; 1566 Value *OffsetBasePtr; 1567 1568 // Remember any i8 pointer we come across to re-use if we need to do a raw 1569 // byte offset. 1570 Value *Int8Ptr = nullptr; 1571 APInt Int8PtrOffset(Offset.getBitWidth(), 0); 1572 1573 Type *TargetTy = PointerTy->getPointerElementType(); 1574 1575 do { 1576 // First fold any existing GEPs into the offset. 1577 while (GEPOperator *GEP = dyn_cast<GEPOperator>(Ptr)) { 1578 APInt GEPOffset(Offset.getBitWidth(), 0); 1579 if (!GEP->accumulateConstantOffset(DL, GEPOffset)) 1580 break; 1581 Offset += GEPOffset; 1582 Ptr = GEP->getPointerOperand(); 1583 if (!Visited.insert(Ptr).second) 1584 break; 1585 } 1586 1587 // See if we can perform a natural GEP here. 1588 Indices.clear(); 1589 if (Value *P = getNaturalGEPWithOffset(IRB, DL, Ptr, Offset, TargetTy, 1590 Indices, NamePrefix)) { 1591 // If we have a new natural pointer at the offset, clear out any old 1592 // offset pointer we computed. Unless it is the base pointer or 1593 // a non-instruction, we built a GEP we don't need. Zap it. 1594 if (OffsetPtr && OffsetPtr != OffsetBasePtr) 1595 if (Instruction *I = dyn_cast<Instruction>(OffsetPtr)) { 1596 assert(I->use_empty() && "Built a GEP with uses some how!"); 1597 I->eraseFromParent(); 1598 } 1599 OffsetPtr = P; 1600 OffsetBasePtr = Ptr; 1601 // If we also found a pointer of the right type, we're done. 1602 if (P->getType() == PointerTy) 1603 return P; 1604 } 1605 1606 // Stash this pointer if we've found an i8*. 1607 if (Ptr->getType()->isIntegerTy(8)) { 1608 Int8Ptr = Ptr; 1609 Int8PtrOffset = Offset; 1610 } 1611 1612 // Peel off a layer of the pointer and update the offset appropriately. 1613 if (Operator::getOpcode(Ptr) == Instruction::BitCast) { 1614 Ptr = cast<Operator>(Ptr)->getOperand(0); 1615 } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(Ptr)) { 1616 if (GA->isInterposable()) 1617 break; 1618 Ptr = GA->getAliasee(); 1619 } else { 1620 break; 1621 } 1622 assert(Ptr->getType()->isPointerTy() && "Unexpected operand type!"); 1623 } while (Visited.insert(Ptr).second); 1624 1625 if (!OffsetPtr) { 1626 if (!Int8Ptr) { 1627 Int8Ptr = IRB.CreateBitCast( 1628 Ptr, IRB.getInt8PtrTy(PointerTy->getPointerAddressSpace()), 1629 NamePrefix + "sroa_raw_cast"); 1630 Int8PtrOffset = Offset; 1631 } 1632 1633 OffsetPtr = Int8PtrOffset == 0 1634 ? Int8Ptr 1635 : IRB.CreateInBoundsGEP(IRB.getInt8Ty(), Int8Ptr, 1636 IRB.getInt(Int8PtrOffset), 1637 NamePrefix + "sroa_raw_idx"); 1638 } 1639 Ptr = OffsetPtr; 1640 1641 // On the off chance we were targeting i8*, guard the bitcast here. 1642 if (Ptr->getType() != PointerTy) 1643 Ptr = IRB.CreateBitCast(Ptr, PointerTy, NamePrefix + "sroa_cast"); 1644 1645 return Ptr; 1646 } 1647 1648 /// Compute the adjusted alignment for a load or store from an offset. 1649 static unsigned getAdjustedAlignment(Instruction *I, uint64_t Offset, 1650 const DataLayout &DL) { 1651 unsigned Alignment; 1652 Type *Ty; 1653 if (auto *LI = dyn_cast<LoadInst>(I)) { 1654 Alignment = LI->getAlignment(); 1655 Ty = LI->getType(); 1656 } else if (auto *SI = dyn_cast<StoreInst>(I)) { 1657 Alignment = SI->getAlignment(); 1658 Ty = SI->getValueOperand()->getType(); 1659 } else { 1660 llvm_unreachable("Only loads and stores are allowed!"); 1661 } 1662 1663 if (!Alignment) 1664 Alignment = DL.getABITypeAlignment(Ty); 1665 1666 return MinAlign(Alignment, Offset); 1667 } 1668 1669 /// Test whether we can convert a value from the old to the new type. 1670 /// 1671 /// This predicate should be used to guard calls to convertValue in order to 1672 /// ensure that we only try to convert viable values. The strategy is that we 1673 /// will peel off single element struct and array wrappings to get to an 1674 /// underlying value, and convert that value. 1675 static bool canConvertValue(const DataLayout &DL, Type *OldTy, Type *NewTy) { 1676 if (OldTy == NewTy) 1677 return true; 1678 1679 // For integer types, we can't handle any bit-width differences. This would 1680 // break both vector conversions with extension and introduce endianness 1681 // issues when in conjunction with loads and stores. 1682 if (isa<IntegerType>(OldTy) && isa<IntegerType>(NewTy)) { 1683 assert(cast<IntegerType>(OldTy)->getBitWidth() != 1684 cast<IntegerType>(NewTy)->getBitWidth() && 1685 "We can't have the same bitwidth for different int types"); 1686 return false; 1687 } 1688 1689 if (DL.getTypeSizeInBits(NewTy) != DL.getTypeSizeInBits(OldTy)) 1690 return false; 1691 if (!NewTy->isSingleValueType() || !OldTy->isSingleValueType()) 1692 return false; 1693 1694 // We can convert pointers to integers and vice-versa. Same for vectors 1695 // of pointers and integers. 1696 OldTy = OldTy->getScalarType(); 1697 NewTy = NewTy->getScalarType(); 1698 if (NewTy->isPointerTy() || OldTy->isPointerTy()) { 1699 if (NewTy->isPointerTy() && OldTy->isPointerTy()) { 1700 return cast<PointerType>(NewTy)->getPointerAddressSpace() == 1701 cast<PointerType>(OldTy)->getPointerAddressSpace(); 1702 } 1703 1704 // We can convert integers to integral pointers, but not to non-integral 1705 // pointers. 1706 if (OldTy->isIntegerTy()) 1707 return !DL.isNonIntegralPointerType(NewTy); 1708 1709 // We can convert integral pointers to integers, but non-integral pointers 1710 // need to remain pointers. 1711 if (!DL.isNonIntegralPointerType(OldTy)) 1712 return NewTy->isIntegerTy(); 1713 1714 return false; 1715 } 1716 1717 return true; 1718 } 1719 1720 /// Generic routine to convert an SSA value to a value of a different 1721 /// type. 1722 /// 1723 /// This will try various different casting techniques, such as bitcasts, 1724 /// inttoptr, and ptrtoint casts. Use the \c canConvertValue predicate to test 1725 /// two types for viability with this routine. 1726 static Value *convertValue(const DataLayout &DL, IRBuilderTy &IRB, Value *V, 1727 Type *NewTy) { 1728 Type *OldTy = V->getType(); 1729 assert(canConvertValue(DL, OldTy, NewTy) && "Value not convertable to type"); 1730 1731 if (OldTy == NewTy) 1732 return V; 1733 1734 assert(!(isa<IntegerType>(OldTy) && isa<IntegerType>(NewTy)) && 1735 "Integer types must be the exact same to convert."); 1736 1737 // See if we need inttoptr for this type pair. A cast involving both scalars 1738 // and vectors requires and additional bitcast. 1739 if (OldTy->isIntOrIntVectorTy() && NewTy->isPtrOrPtrVectorTy()) { 1740 // Expand <2 x i32> to i8* --> <2 x i32> to i64 to i8* 1741 if (OldTy->isVectorTy() && !NewTy->isVectorTy()) 1742 return IRB.CreateIntToPtr(IRB.CreateBitCast(V, DL.getIntPtrType(NewTy)), 1743 NewTy); 1744 1745 // Expand i128 to <2 x i8*> --> i128 to <2 x i64> to <2 x i8*> 1746 if (!OldTy->isVectorTy() && NewTy->isVectorTy()) 1747 return IRB.CreateIntToPtr(IRB.CreateBitCast(V, DL.getIntPtrType(NewTy)), 1748 NewTy); 1749 1750 return IRB.CreateIntToPtr(V, NewTy); 1751 } 1752 1753 // See if we need ptrtoint for this type pair. A cast involving both scalars 1754 // and vectors requires and additional bitcast. 1755 if (OldTy->isPtrOrPtrVectorTy() && NewTy->isIntOrIntVectorTy()) { 1756 // Expand <2 x i8*> to i128 --> <2 x i8*> to <2 x i64> to i128 1757 if (OldTy->isVectorTy() && !NewTy->isVectorTy()) 1758 return IRB.CreateBitCast(IRB.CreatePtrToInt(V, DL.getIntPtrType(OldTy)), 1759 NewTy); 1760 1761 // Expand i8* to <2 x i32> --> i8* to i64 to <2 x i32> 1762 if (!OldTy->isVectorTy() && NewTy->isVectorTy()) 1763 return IRB.CreateBitCast(IRB.CreatePtrToInt(V, DL.getIntPtrType(OldTy)), 1764 NewTy); 1765 1766 return IRB.CreatePtrToInt(V, NewTy); 1767 } 1768 1769 return IRB.CreateBitCast(V, NewTy); 1770 } 1771 1772 /// Test whether the given slice use can be promoted to a vector. 1773 /// 1774 /// This function is called to test each entry in a partition which is slated 1775 /// for a single slice. 1776 static bool isVectorPromotionViableForSlice(Partition &P, const Slice &S, 1777 VectorType *Ty, 1778 uint64_t ElementSize, 1779 const DataLayout &DL) { 1780 // First validate the slice offsets. 1781 uint64_t BeginOffset = 1782 std::max(S.beginOffset(), P.beginOffset()) - P.beginOffset(); 1783 uint64_t BeginIndex = BeginOffset / ElementSize; 1784 if (BeginIndex * ElementSize != BeginOffset || 1785 BeginIndex >= Ty->getNumElements()) 1786 return false; 1787 uint64_t EndOffset = 1788 std::min(S.endOffset(), P.endOffset()) - P.beginOffset(); 1789 uint64_t EndIndex = EndOffset / ElementSize; 1790 if (EndIndex * ElementSize != EndOffset || EndIndex > Ty->getNumElements()) 1791 return false; 1792 1793 assert(EndIndex > BeginIndex && "Empty vector!"); 1794 uint64_t NumElements = EndIndex - BeginIndex; 1795 Type *SliceTy = (NumElements == 1) 1796 ? Ty->getElementType() 1797 : VectorType::get(Ty->getElementType(), NumElements); 1798 1799 Type *SplitIntTy = 1800 Type::getIntNTy(Ty->getContext(), NumElements * ElementSize * 8); 1801 1802 Use *U = S.getUse(); 1803 1804 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(U->getUser())) { 1805 if (MI->isVolatile()) 1806 return false; 1807 if (!S.isSplittable()) 1808 return false; // Skip any unsplittable intrinsics. 1809 } else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(U->getUser())) { 1810 if (II->getIntrinsicID() != Intrinsic::lifetime_start && 1811 II->getIntrinsicID() != Intrinsic::lifetime_end) 1812 return false; 1813 } else if (U->get()->getType()->getPointerElementType()->isStructTy()) { 1814 // Disable vector promotion when there are loads or stores of an FCA. 1815 return false; 1816 } else if (LoadInst *LI = dyn_cast<LoadInst>(U->getUser())) { 1817 if (LI->isVolatile()) 1818 return false; 1819 Type *LTy = LI->getType(); 1820 if (P.beginOffset() > S.beginOffset() || P.endOffset() < S.endOffset()) { 1821 assert(LTy->isIntegerTy()); 1822 LTy = SplitIntTy; 1823 } 1824 if (!canConvertValue(DL, SliceTy, LTy)) 1825 return false; 1826 } else if (StoreInst *SI = dyn_cast<StoreInst>(U->getUser())) { 1827 if (SI->isVolatile()) 1828 return false; 1829 Type *STy = SI->getValueOperand()->getType(); 1830 if (P.beginOffset() > S.beginOffset() || P.endOffset() < S.endOffset()) { 1831 assert(STy->isIntegerTy()); 1832 STy = SplitIntTy; 1833 } 1834 if (!canConvertValue(DL, STy, SliceTy)) 1835 return false; 1836 } else { 1837 return false; 1838 } 1839 1840 return true; 1841 } 1842 1843 /// Test whether the given alloca partitioning and range of slices can be 1844 /// promoted to a vector. 1845 /// 1846 /// This is a quick test to check whether we can rewrite a particular alloca 1847 /// partition (and its newly formed alloca) into a vector alloca with only 1848 /// whole-vector loads and stores such that it could be promoted to a vector 1849 /// SSA value. We only can ensure this for a limited set of operations, and we 1850 /// don't want to do the rewrites unless we are confident that the result will 1851 /// be promotable, so we have an early test here. 1852 static VectorType *isVectorPromotionViable(Partition &P, const DataLayout &DL) { 1853 // Collect the candidate types for vector-based promotion. Also track whether 1854 // we have different element types. 1855 SmallVector<VectorType *, 4> CandidateTys; 1856 Type *CommonEltTy = nullptr; 1857 bool HaveCommonEltTy = true; 1858 auto CheckCandidateType = [&](Type *Ty) { 1859 if (auto *VTy = dyn_cast<VectorType>(Ty)) { 1860 CandidateTys.push_back(VTy); 1861 if (!CommonEltTy) 1862 CommonEltTy = VTy->getElementType(); 1863 else if (CommonEltTy != VTy->getElementType()) 1864 HaveCommonEltTy = false; 1865 } 1866 }; 1867 // Consider any loads or stores that are the exact size of the slice. 1868 for (const Slice &S : P) 1869 if (S.beginOffset() == P.beginOffset() && 1870 S.endOffset() == P.endOffset()) { 1871 if (auto *LI = dyn_cast<LoadInst>(S.getUse()->getUser())) 1872 CheckCandidateType(LI->getType()); 1873 else if (auto *SI = dyn_cast<StoreInst>(S.getUse()->getUser())) 1874 CheckCandidateType(SI->getValueOperand()->getType()); 1875 } 1876 1877 // If we didn't find a vector type, nothing to do here. 1878 if (CandidateTys.empty()) 1879 return nullptr; 1880 1881 // Remove non-integer vector types if we had multiple common element types. 1882 // FIXME: It'd be nice to replace them with integer vector types, but we can't 1883 // do that until all the backends are known to produce good code for all 1884 // integer vector types. 1885 if (!HaveCommonEltTy) { 1886 CandidateTys.erase( 1887 llvm::remove_if(CandidateTys, 1888 [](VectorType *VTy) { 1889 return !VTy->getElementType()->isIntegerTy(); 1890 }), 1891 CandidateTys.end()); 1892 1893 // If there were no integer vector types, give up. 1894 if (CandidateTys.empty()) 1895 return nullptr; 1896 1897 // Rank the remaining candidate vector types. This is easy because we know 1898 // they're all integer vectors. We sort by ascending number of elements. 1899 auto RankVectorTypes = [&DL](VectorType *RHSTy, VectorType *LHSTy) { 1900 (void)DL; 1901 assert(DL.getTypeSizeInBits(RHSTy) == DL.getTypeSizeInBits(LHSTy) && 1902 "Cannot have vector types of different sizes!"); 1903 assert(RHSTy->getElementType()->isIntegerTy() && 1904 "All non-integer types eliminated!"); 1905 assert(LHSTy->getElementType()->isIntegerTy() && 1906 "All non-integer types eliminated!"); 1907 return RHSTy->getNumElements() < LHSTy->getNumElements(); 1908 }; 1909 llvm::sort(CandidateTys, RankVectorTypes); 1910 CandidateTys.erase( 1911 std::unique(CandidateTys.begin(), CandidateTys.end(), RankVectorTypes), 1912 CandidateTys.end()); 1913 } else { 1914 // The only way to have the same element type in every vector type is to 1915 // have the same vector type. Check that and remove all but one. 1916 #ifndef NDEBUG 1917 for (VectorType *VTy : CandidateTys) { 1918 assert(VTy->getElementType() == CommonEltTy && 1919 "Unaccounted for element type!"); 1920 assert(VTy == CandidateTys[0] && 1921 "Different vector types with the same element type!"); 1922 } 1923 #endif 1924 CandidateTys.resize(1); 1925 } 1926 1927 // Try each vector type, and return the one which works. 1928 auto CheckVectorTypeForPromotion = [&](VectorType *VTy) { 1929 uint64_t ElementSize = DL.getTypeSizeInBits(VTy->getElementType()); 1930 1931 // While the definition of LLVM vectors is bitpacked, we don't support sizes 1932 // that aren't byte sized. 1933 if (ElementSize % 8) 1934 return false; 1935 assert((DL.getTypeSizeInBits(VTy) % 8) == 0 && 1936 "vector size not a multiple of element size?"); 1937 ElementSize /= 8; 1938 1939 for (const Slice &S : P) 1940 if (!isVectorPromotionViableForSlice(P, S, VTy, ElementSize, DL)) 1941 return false; 1942 1943 for (const Slice *S : P.splitSliceTails()) 1944 if (!isVectorPromotionViableForSlice(P, *S, VTy, ElementSize, DL)) 1945 return false; 1946 1947 return true; 1948 }; 1949 for (VectorType *VTy : CandidateTys) 1950 if (CheckVectorTypeForPromotion(VTy)) 1951 return VTy; 1952 1953 return nullptr; 1954 } 1955 1956 /// Test whether a slice of an alloca is valid for integer widening. 1957 /// 1958 /// This implements the necessary checking for the \c isIntegerWideningViable 1959 /// test below on a single slice of the alloca. 1960 static bool isIntegerWideningViableForSlice(const Slice &S, 1961 uint64_t AllocBeginOffset, 1962 Type *AllocaTy, 1963 const DataLayout &DL, 1964 bool &WholeAllocaOp) { 1965 uint64_t Size = DL.getTypeStoreSize(AllocaTy); 1966 1967 uint64_t RelBegin = S.beginOffset() - AllocBeginOffset; 1968 uint64_t RelEnd = S.endOffset() - AllocBeginOffset; 1969 1970 // We can't reasonably handle cases where the load or store extends past 1971 // the end of the alloca's type and into its padding. 1972 if (RelEnd > Size) 1973 return false; 1974 1975 Use *U = S.getUse(); 1976 1977 if (LoadInst *LI = dyn_cast<LoadInst>(U->getUser())) { 1978 if (LI->isVolatile()) 1979 return false; 1980 // We can't handle loads that extend past the allocated memory. 1981 if (DL.getTypeStoreSize(LI->getType()) > Size) 1982 return false; 1983 // So far, AllocaSliceRewriter does not support widening split slice tails 1984 // in rewriteIntegerLoad. 1985 if (S.beginOffset() < AllocBeginOffset) 1986 return false; 1987 // Note that we don't count vector loads or stores as whole-alloca 1988 // operations which enable integer widening because we would prefer to use 1989 // vector widening instead. 1990 if (!isa<VectorType>(LI->getType()) && RelBegin == 0 && RelEnd == Size) 1991 WholeAllocaOp = true; 1992 if (IntegerType *ITy = dyn_cast<IntegerType>(LI->getType())) { 1993 if (ITy->getBitWidth() < DL.getTypeStoreSizeInBits(ITy)) 1994 return false; 1995 } else if (RelBegin != 0 || RelEnd != Size || 1996 !canConvertValue(DL, AllocaTy, LI->getType())) { 1997 // Non-integer loads need to be convertible from the alloca type so that 1998 // they are promotable. 1999 return false; 2000 } 2001 } else if (StoreInst *SI = dyn_cast<StoreInst>(U->getUser())) { 2002 Type *ValueTy = SI->getValueOperand()->getType(); 2003 if (SI->isVolatile()) 2004 return false; 2005 // We can't handle stores that extend past the allocated memory. 2006 if (DL.getTypeStoreSize(ValueTy) > Size) 2007 return false; 2008 // So far, AllocaSliceRewriter does not support widening split slice tails 2009 // in rewriteIntegerStore. 2010 if (S.beginOffset() < AllocBeginOffset) 2011 return false; 2012 // Note that we don't count vector loads or stores as whole-alloca 2013 // operations which enable integer widening because we would prefer to use 2014 // vector widening instead. 2015 if (!isa<VectorType>(ValueTy) && RelBegin == 0 && RelEnd == Size) 2016 WholeAllocaOp = true; 2017 if (IntegerType *ITy = dyn_cast<IntegerType>(ValueTy)) { 2018 if (ITy->getBitWidth() < DL.getTypeStoreSizeInBits(ITy)) 2019 return false; 2020 } else if (RelBegin != 0 || RelEnd != Size || 2021 !canConvertValue(DL, ValueTy, AllocaTy)) { 2022 // Non-integer stores need to be convertible to the alloca type so that 2023 // they are promotable. 2024 return false; 2025 } 2026 } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(U->getUser())) { 2027 if (MI->isVolatile() || !isa<Constant>(MI->getLength())) 2028 return false; 2029 if (!S.isSplittable()) 2030 return false; // Skip any unsplittable intrinsics. 2031 } else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(U->getUser())) { 2032 if (II->getIntrinsicID() != Intrinsic::lifetime_start && 2033 II->getIntrinsicID() != Intrinsic::lifetime_end) 2034 return false; 2035 } else { 2036 return false; 2037 } 2038 2039 return true; 2040 } 2041 2042 /// Test whether the given alloca partition's integer operations can be 2043 /// widened to promotable ones. 2044 /// 2045 /// This is a quick test to check whether we can rewrite the integer loads and 2046 /// stores to a particular alloca into wider loads and stores and be able to 2047 /// promote the resulting alloca. 2048 static bool isIntegerWideningViable(Partition &P, Type *AllocaTy, 2049 const DataLayout &DL) { 2050 uint64_t SizeInBits = DL.getTypeSizeInBits(AllocaTy); 2051 // Don't create integer types larger than the maximum bitwidth. 2052 if (SizeInBits > IntegerType::MAX_INT_BITS) 2053 return false; 2054 2055 // Don't try to handle allocas with bit-padding. 2056 if (SizeInBits != DL.getTypeStoreSizeInBits(AllocaTy)) 2057 return false; 2058 2059 // We need to ensure that an integer type with the appropriate bitwidth can 2060 // be converted to the alloca type, whatever that is. We don't want to force 2061 // the alloca itself to have an integer type if there is a more suitable one. 2062 Type *IntTy = Type::getIntNTy(AllocaTy->getContext(), SizeInBits); 2063 if (!canConvertValue(DL, AllocaTy, IntTy) || 2064 !canConvertValue(DL, IntTy, AllocaTy)) 2065 return false; 2066 2067 // While examining uses, we ensure that the alloca has a covering load or 2068 // store. We don't want to widen the integer operations only to fail to 2069 // promote due to some other unsplittable entry (which we may make splittable 2070 // later). However, if there are only splittable uses, go ahead and assume 2071 // that we cover the alloca. 2072 // FIXME: We shouldn't consider split slices that happen to start in the 2073 // partition here... 2074 bool WholeAllocaOp = 2075 P.begin() != P.end() ? false : DL.isLegalInteger(SizeInBits); 2076 2077 for (const Slice &S : P) 2078 if (!isIntegerWideningViableForSlice(S, P.beginOffset(), AllocaTy, DL, 2079 WholeAllocaOp)) 2080 return false; 2081 2082 for (const Slice *S : P.splitSliceTails()) 2083 if (!isIntegerWideningViableForSlice(*S, P.beginOffset(), AllocaTy, DL, 2084 WholeAllocaOp)) 2085 return false; 2086 2087 return WholeAllocaOp; 2088 } 2089 2090 static Value *extractInteger(const DataLayout &DL, IRBuilderTy &IRB, Value *V, 2091 IntegerType *Ty, uint64_t Offset, 2092 const Twine &Name) { 2093 LLVM_DEBUG(dbgs() << " start: " << *V << "\n"); 2094 IntegerType *IntTy = cast<IntegerType>(V->getType()); 2095 assert(DL.getTypeStoreSize(Ty) + Offset <= DL.getTypeStoreSize(IntTy) && 2096 "Element extends past full value"); 2097 uint64_t ShAmt = 8 * Offset; 2098 if (DL.isBigEndian()) 2099 ShAmt = 8 * (DL.getTypeStoreSize(IntTy) - DL.getTypeStoreSize(Ty) - Offset); 2100 if (ShAmt) { 2101 V = IRB.CreateLShr(V, ShAmt, Name + ".shift"); 2102 LLVM_DEBUG(dbgs() << " shifted: " << *V << "\n"); 2103 } 2104 assert(Ty->getBitWidth() <= IntTy->getBitWidth() && 2105 "Cannot extract to a larger integer!"); 2106 if (Ty != IntTy) { 2107 V = IRB.CreateTrunc(V, Ty, Name + ".trunc"); 2108 LLVM_DEBUG(dbgs() << " trunced: " << *V << "\n"); 2109 } 2110 return V; 2111 } 2112 2113 static Value *insertInteger(const DataLayout &DL, IRBuilderTy &IRB, Value *Old, 2114 Value *V, uint64_t Offset, const Twine &Name) { 2115 IntegerType *IntTy = cast<IntegerType>(Old->getType()); 2116 IntegerType *Ty = cast<IntegerType>(V->getType()); 2117 assert(Ty->getBitWidth() <= IntTy->getBitWidth() && 2118 "Cannot insert a larger integer!"); 2119 LLVM_DEBUG(dbgs() << " start: " << *V << "\n"); 2120 if (Ty != IntTy) { 2121 V = IRB.CreateZExt(V, IntTy, Name + ".ext"); 2122 LLVM_DEBUG(dbgs() << " extended: " << *V << "\n"); 2123 } 2124 assert(DL.getTypeStoreSize(Ty) + Offset <= DL.getTypeStoreSize(IntTy) && 2125 "Element store outside of alloca store"); 2126 uint64_t ShAmt = 8 * Offset; 2127 if (DL.isBigEndian()) 2128 ShAmt = 8 * (DL.getTypeStoreSize(IntTy) - DL.getTypeStoreSize(Ty) - Offset); 2129 if (ShAmt) { 2130 V = IRB.CreateShl(V, ShAmt, Name + ".shift"); 2131 LLVM_DEBUG(dbgs() << " shifted: " << *V << "\n"); 2132 } 2133 2134 if (ShAmt || Ty->getBitWidth() < IntTy->getBitWidth()) { 2135 APInt Mask = ~Ty->getMask().zext(IntTy->getBitWidth()).shl(ShAmt); 2136 Old = IRB.CreateAnd(Old, Mask, Name + ".mask"); 2137 LLVM_DEBUG(dbgs() << " masked: " << *Old << "\n"); 2138 V = IRB.CreateOr(Old, V, Name + ".insert"); 2139 LLVM_DEBUG(dbgs() << " inserted: " << *V << "\n"); 2140 } 2141 return V; 2142 } 2143 2144 static Value *extractVector(IRBuilderTy &IRB, Value *V, unsigned BeginIndex, 2145 unsigned EndIndex, const Twine &Name) { 2146 VectorType *VecTy = cast<VectorType>(V->getType()); 2147 unsigned NumElements = EndIndex - BeginIndex; 2148 assert(NumElements <= VecTy->getNumElements() && "Too many elements!"); 2149 2150 if (NumElements == VecTy->getNumElements()) 2151 return V; 2152 2153 if (NumElements == 1) { 2154 V = IRB.CreateExtractElement(V, IRB.getInt32(BeginIndex), 2155 Name + ".extract"); 2156 LLVM_DEBUG(dbgs() << " extract: " << *V << "\n"); 2157 return V; 2158 } 2159 2160 SmallVector<Constant *, 8> Mask; 2161 Mask.reserve(NumElements); 2162 for (unsigned i = BeginIndex; i != EndIndex; ++i) 2163 Mask.push_back(IRB.getInt32(i)); 2164 V = IRB.CreateShuffleVector(V, UndefValue::get(V->getType()), 2165 ConstantVector::get(Mask), Name + ".extract"); 2166 LLVM_DEBUG(dbgs() << " shuffle: " << *V << "\n"); 2167 return V; 2168 } 2169 2170 static Value *insertVector(IRBuilderTy &IRB, Value *Old, Value *V, 2171 unsigned BeginIndex, const Twine &Name) { 2172 VectorType *VecTy = cast<VectorType>(Old->getType()); 2173 assert(VecTy && "Can only insert a vector into a vector"); 2174 2175 VectorType *Ty = dyn_cast<VectorType>(V->getType()); 2176 if (!Ty) { 2177 // Single element to insert. 2178 V = IRB.CreateInsertElement(Old, V, IRB.getInt32(BeginIndex), 2179 Name + ".insert"); 2180 LLVM_DEBUG(dbgs() << " insert: " << *V << "\n"); 2181 return V; 2182 } 2183 2184 assert(Ty->getNumElements() <= VecTy->getNumElements() && 2185 "Too many elements!"); 2186 if (Ty->getNumElements() == VecTy->getNumElements()) { 2187 assert(V->getType() == VecTy && "Vector type mismatch"); 2188 return V; 2189 } 2190 unsigned EndIndex = BeginIndex + Ty->getNumElements(); 2191 2192 // When inserting a smaller vector into the larger to store, we first 2193 // use a shuffle vector to widen it with undef elements, and then 2194 // a second shuffle vector to select between the loaded vector and the 2195 // incoming vector. 2196 SmallVector<Constant *, 8> Mask; 2197 Mask.reserve(VecTy->getNumElements()); 2198 for (unsigned i = 0; i != VecTy->getNumElements(); ++i) 2199 if (i >= BeginIndex && i < EndIndex) 2200 Mask.push_back(IRB.getInt32(i - BeginIndex)); 2201 else 2202 Mask.push_back(UndefValue::get(IRB.getInt32Ty())); 2203 V = IRB.CreateShuffleVector(V, UndefValue::get(V->getType()), 2204 ConstantVector::get(Mask), Name + ".expand"); 2205 LLVM_DEBUG(dbgs() << " shuffle: " << *V << "\n"); 2206 2207 Mask.clear(); 2208 for (unsigned i = 0; i != VecTy->getNumElements(); ++i) 2209 Mask.push_back(IRB.getInt1(i >= BeginIndex && i < EndIndex)); 2210 2211 V = IRB.CreateSelect(ConstantVector::get(Mask), V, Old, Name + "blend"); 2212 2213 LLVM_DEBUG(dbgs() << " blend: " << *V << "\n"); 2214 return V; 2215 } 2216 2217 /// Visitor to rewrite instructions using p particular slice of an alloca 2218 /// to use a new alloca. 2219 /// 2220 /// Also implements the rewriting to vector-based accesses when the partition 2221 /// passes the isVectorPromotionViable predicate. Most of the rewriting logic 2222 /// lives here. 2223 class llvm::sroa::AllocaSliceRewriter 2224 : public InstVisitor<AllocaSliceRewriter, bool> { 2225 // Befriend the base class so it can delegate to private visit methods. 2226 friend class InstVisitor<AllocaSliceRewriter, bool>; 2227 2228 using Base = InstVisitor<AllocaSliceRewriter, bool>; 2229 2230 const DataLayout &DL; 2231 AllocaSlices &AS; 2232 SROA &Pass; 2233 AllocaInst &OldAI, &NewAI; 2234 const uint64_t NewAllocaBeginOffset, NewAllocaEndOffset; 2235 Type *NewAllocaTy; 2236 2237 // This is a convenience and flag variable that will be null unless the new 2238 // alloca's integer operations should be widened to this integer type due to 2239 // passing isIntegerWideningViable above. If it is non-null, the desired 2240 // integer type will be stored here for easy access during rewriting. 2241 IntegerType *IntTy; 2242 2243 // If we are rewriting an alloca partition which can be written as pure 2244 // vector operations, we stash extra information here. When VecTy is 2245 // non-null, we have some strict guarantees about the rewritten alloca: 2246 // - The new alloca is exactly the size of the vector type here. 2247 // - The accesses all either map to the entire vector or to a single 2248 // element. 2249 // - The set of accessing instructions is only one of those handled above 2250 // in isVectorPromotionViable. Generally these are the same access kinds 2251 // which are promotable via mem2reg. 2252 VectorType *VecTy; 2253 Type *ElementTy; 2254 uint64_t ElementSize; 2255 2256 // The original offset of the slice currently being rewritten relative to 2257 // the original alloca. 2258 uint64_t BeginOffset = 0; 2259 uint64_t EndOffset = 0; 2260 2261 // The new offsets of the slice currently being rewritten relative to the 2262 // original alloca. 2263 uint64_t NewBeginOffset, NewEndOffset; 2264 2265 uint64_t SliceSize; 2266 bool IsSplittable = false; 2267 bool IsSplit = false; 2268 Use *OldUse = nullptr; 2269 Instruction *OldPtr = nullptr; 2270 2271 // Track post-rewrite users which are PHI nodes and Selects. 2272 SmallSetVector<PHINode *, 8> &PHIUsers; 2273 SmallSetVector<SelectInst *, 8> &SelectUsers; 2274 2275 // Utility IR builder, whose name prefix is setup for each visited use, and 2276 // the insertion point is set to point to the user. 2277 IRBuilderTy IRB; 2278 2279 public: 2280 AllocaSliceRewriter(const DataLayout &DL, AllocaSlices &AS, SROA &Pass, 2281 AllocaInst &OldAI, AllocaInst &NewAI, 2282 uint64_t NewAllocaBeginOffset, 2283 uint64_t NewAllocaEndOffset, bool IsIntegerPromotable, 2284 VectorType *PromotableVecTy, 2285 SmallSetVector<PHINode *, 8> &PHIUsers, 2286 SmallSetVector<SelectInst *, 8> &SelectUsers) 2287 : DL(DL), AS(AS), Pass(Pass), OldAI(OldAI), NewAI(NewAI), 2288 NewAllocaBeginOffset(NewAllocaBeginOffset), 2289 NewAllocaEndOffset(NewAllocaEndOffset), 2290 NewAllocaTy(NewAI.getAllocatedType()), 2291 IntTy(IsIntegerPromotable 2292 ? Type::getIntNTy( 2293 NewAI.getContext(), 2294 DL.getTypeSizeInBits(NewAI.getAllocatedType())) 2295 : nullptr), 2296 VecTy(PromotableVecTy), 2297 ElementTy(VecTy ? VecTy->getElementType() : nullptr), 2298 ElementSize(VecTy ? DL.getTypeSizeInBits(ElementTy) / 8 : 0), 2299 PHIUsers(PHIUsers), SelectUsers(SelectUsers), 2300 IRB(NewAI.getContext(), ConstantFolder()) { 2301 if (VecTy) { 2302 assert((DL.getTypeSizeInBits(ElementTy) % 8) == 0 && 2303 "Only multiple-of-8 sized vector elements are viable"); 2304 ++NumVectorized; 2305 } 2306 assert((!IntTy && !VecTy) || (IntTy && !VecTy) || (!IntTy && VecTy)); 2307 } 2308 2309 bool visit(AllocaSlices::const_iterator I) { 2310 bool CanSROA = true; 2311 BeginOffset = I->beginOffset(); 2312 EndOffset = I->endOffset(); 2313 IsSplittable = I->isSplittable(); 2314 IsSplit = 2315 BeginOffset < NewAllocaBeginOffset || EndOffset > NewAllocaEndOffset; 2316 LLVM_DEBUG(dbgs() << " rewriting " << (IsSplit ? "split " : "")); 2317 LLVM_DEBUG(AS.printSlice(dbgs(), I, "")); 2318 LLVM_DEBUG(dbgs() << "\n"); 2319 2320 // Compute the intersecting offset range. 2321 assert(BeginOffset < NewAllocaEndOffset); 2322 assert(EndOffset > NewAllocaBeginOffset); 2323 NewBeginOffset = std::max(BeginOffset, NewAllocaBeginOffset); 2324 NewEndOffset = std::min(EndOffset, NewAllocaEndOffset); 2325 2326 SliceSize = NewEndOffset - NewBeginOffset; 2327 2328 OldUse = I->getUse(); 2329 OldPtr = cast<Instruction>(OldUse->get()); 2330 2331 Instruction *OldUserI = cast<Instruction>(OldUse->getUser()); 2332 IRB.SetInsertPoint(OldUserI); 2333 IRB.SetCurrentDebugLocation(OldUserI->getDebugLoc()); 2334 IRB.SetNamePrefix(Twine(NewAI.getName()) + "." + Twine(BeginOffset) + "."); 2335 2336 CanSROA &= visit(cast<Instruction>(OldUse->getUser())); 2337 if (VecTy || IntTy) 2338 assert(CanSROA); 2339 return CanSROA; 2340 } 2341 2342 private: 2343 // Make sure the other visit overloads are visible. 2344 using Base::visit; 2345 2346 // Every instruction which can end up as a user must have a rewrite rule. 2347 bool visitInstruction(Instruction &I) { 2348 LLVM_DEBUG(dbgs() << " !!!! Cannot rewrite: " << I << "\n"); 2349 llvm_unreachable("No rewrite rule for this instruction!"); 2350 } 2351 2352 Value *getNewAllocaSlicePtr(IRBuilderTy &IRB, Type *PointerTy) { 2353 // Note that the offset computation can use BeginOffset or NewBeginOffset 2354 // interchangeably for unsplit slices. 2355 assert(IsSplit || BeginOffset == NewBeginOffset); 2356 uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset; 2357 2358 #ifndef NDEBUG 2359 StringRef OldName = OldPtr->getName(); 2360 // Skip through the last '.sroa.' component of the name. 2361 size_t LastSROAPrefix = OldName.rfind(".sroa."); 2362 if (LastSROAPrefix != StringRef::npos) { 2363 OldName = OldName.substr(LastSROAPrefix + strlen(".sroa.")); 2364 // Look for an SROA slice index. 2365 size_t IndexEnd = OldName.find_first_not_of("0123456789"); 2366 if (IndexEnd != StringRef::npos && OldName[IndexEnd] == '.') { 2367 // Strip the index and look for the offset. 2368 OldName = OldName.substr(IndexEnd + 1); 2369 size_t OffsetEnd = OldName.find_first_not_of("0123456789"); 2370 if (OffsetEnd != StringRef::npos && OldName[OffsetEnd] == '.') 2371 // Strip the offset. 2372 OldName = OldName.substr(OffsetEnd + 1); 2373 } 2374 } 2375 // Strip any SROA suffixes as well. 2376 OldName = OldName.substr(0, OldName.find(".sroa_")); 2377 #endif 2378 2379 return getAdjustedPtr(IRB, DL, &NewAI, 2380 APInt(DL.getIndexTypeSizeInBits(PointerTy), Offset), 2381 PointerTy, 2382 #ifndef NDEBUG 2383 Twine(OldName) + "." 2384 #else 2385 Twine() 2386 #endif 2387 ); 2388 } 2389 2390 /// Compute suitable alignment to access this slice of the *new* 2391 /// alloca. 2392 /// 2393 /// You can optionally pass a type to this routine and if that type's ABI 2394 /// alignment is itself suitable, this will return zero. 2395 unsigned getSliceAlign(Type *Ty = nullptr) { 2396 unsigned NewAIAlign = NewAI.getAlignment(); 2397 if (!NewAIAlign) 2398 NewAIAlign = DL.getABITypeAlignment(NewAI.getAllocatedType()); 2399 unsigned Align = 2400 MinAlign(NewAIAlign, NewBeginOffset - NewAllocaBeginOffset); 2401 return (Ty && Align == DL.getABITypeAlignment(Ty)) ? 0 : Align; 2402 } 2403 2404 unsigned getIndex(uint64_t Offset) { 2405 assert(VecTy && "Can only call getIndex when rewriting a vector"); 2406 uint64_t RelOffset = Offset - NewAllocaBeginOffset; 2407 assert(RelOffset / ElementSize < UINT32_MAX && "Index out of bounds"); 2408 uint32_t Index = RelOffset / ElementSize; 2409 assert(Index * ElementSize == RelOffset); 2410 return Index; 2411 } 2412 2413 void deleteIfTriviallyDead(Value *V) { 2414 Instruction *I = cast<Instruction>(V); 2415 if (isInstructionTriviallyDead(I)) 2416 Pass.DeadInsts.insert(I); 2417 } 2418 2419 Value *rewriteVectorizedLoadInst() { 2420 unsigned BeginIndex = getIndex(NewBeginOffset); 2421 unsigned EndIndex = getIndex(NewEndOffset); 2422 assert(EndIndex > BeginIndex && "Empty vector!"); 2423 2424 Value *V = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), "load"); 2425 return extractVector(IRB, V, BeginIndex, EndIndex, "vec"); 2426 } 2427 2428 Value *rewriteIntegerLoad(LoadInst &LI) { 2429 assert(IntTy && "We cannot insert an integer to the alloca"); 2430 assert(!LI.isVolatile()); 2431 Value *V = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), "load"); 2432 V = convertValue(DL, IRB, V, IntTy); 2433 assert(NewBeginOffset >= NewAllocaBeginOffset && "Out of bounds offset"); 2434 uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset; 2435 if (Offset > 0 || NewEndOffset < NewAllocaEndOffset) { 2436 IntegerType *ExtractTy = Type::getIntNTy(LI.getContext(), SliceSize * 8); 2437 V = extractInteger(DL, IRB, V, ExtractTy, Offset, "extract"); 2438 } 2439 // It is possible that the extracted type is not the load type. This 2440 // happens if there is a load past the end of the alloca, and as 2441 // a consequence the slice is narrower but still a candidate for integer 2442 // lowering. To handle this case, we just zero extend the extracted 2443 // integer. 2444 assert(cast<IntegerType>(LI.getType())->getBitWidth() >= SliceSize * 8 && 2445 "Can only handle an extract for an overly wide load"); 2446 if (cast<IntegerType>(LI.getType())->getBitWidth() > SliceSize * 8) 2447 V = IRB.CreateZExt(V, LI.getType()); 2448 return V; 2449 } 2450 2451 bool visitLoadInst(LoadInst &LI) { 2452 LLVM_DEBUG(dbgs() << " original: " << LI << "\n"); 2453 Value *OldOp = LI.getOperand(0); 2454 assert(OldOp == OldPtr); 2455 2456 AAMDNodes AATags; 2457 LI.getAAMetadata(AATags); 2458 2459 unsigned AS = LI.getPointerAddressSpace(); 2460 2461 Type *TargetTy = IsSplit ? Type::getIntNTy(LI.getContext(), SliceSize * 8) 2462 : LI.getType(); 2463 const bool IsLoadPastEnd = DL.getTypeStoreSize(TargetTy) > SliceSize; 2464 bool IsPtrAdjusted = false; 2465 Value *V; 2466 if (VecTy) { 2467 V = rewriteVectorizedLoadInst(); 2468 } else if (IntTy && LI.getType()->isIntegerTy()) { 2469 V = rewriteIntegerLoad(LI); 2470 } else if (NewBeginOffset == NewAllocaBeginOffset && 2471 NewEndOffset == NewAllocaEndOffset && 2472 (canConvertValue(DL, NewAllocaTy, TargetTy) || 2473 (IsLoadPastEnd && NewAllocaTy->isIntegerTy() && 2474 TargetTy->isIntegerTy()))) { 2475 LoadInst *NewLI = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), 2476 LI.isVolatile(), LI.getName()); 2477 if (AATags) 2478 NewLI->setAAMetadata(AATags); 2479 if (LI.isVolatile()) 2480 NewLI->setAtomic(LI.getOrdering(), LI.getSyncScopeID()); 2481 2482 // Any !nonnull metadata or !range metadata on the old load is also valid 2483 // on the new load. This is even true in some cases even when the loads 2484 // are different types, for example by mapping !nonnull metadata to 2485 // !range metadata by modeling the null pointer constant converted to the 2486 // integer type. 2487 // FIXME: Add support for range metadata here. Currently the utilities 2488 // for this don't propagate range metadata in trivial cases from one 2489 // integer load to another, don't handle non-addrspace-0 null pointers 2490 // correctly, and don't have any support for mapping ranges as the 2491 // integer type becomes winder or narrower. 2492 if (MDNode *N = LI.getMetadata(LLVMContext::MD_nonnull)) 2493 copyNonnullMetadata(LI, N, *NewLI); 2494 2495 // Try to preserve nonnull metadata 2496 V = NewLI; 2497 2498 // If this is an integer load past the end of the slice (which means the 2499 // bytes outside the slice are undef or this load is dead) just forcibly 2500 // fix the integer size with correct handling of endianness. 2501 if (auto *AITy = dyn_cast<IntegerType>(NewAllocaTy)) 2502 if (auto *TITy = dyn_cast<IntegerType>(TargetTy)) 2503 if (AITy->getBitWidth() < TITy->getBitWidth()) { 2504 V = IRB.CreateZExt(V, TITy, "load.ext"); 2505 if (DL.isBigEndian()) 2506 V = IRB.CreateShl(V, TITy->getBitWidth() - AITy->getBitWidth(), 2507 "endian_shift"); 2508 } 2509 } else { 2510 Type *LTy = TargetTy->getPointerTo(AS); 2511 LoadInst *NewLI = IRB.CreateAlignedLoad(getNewAllocaSlicePtr(IRB, LTy), 2512 getSliceAlign(TargetTy), 2513 LI.isVolatile(), LI.getName()); 2514 if (AATags) 2515 NewLI->setAAMetadata(AATags); 2516 if (LI.isVolatile()) 2517 NewLI->setAtomic(LI.getOrdering(), LI.getSyncScopeID()); 2518 2519 V = NewLI; 2520 IsPtrAdjusted = true; 2521 } 2522 V = convertValue(DL, IRB, V, TargetTy); 2523 2524 if (IsSplit) { 2525 assert(!LI.isVolatile()); 2526 assert(LI.getType()->isIntegerTy() && 2527 "Only integer type loads and stores are split"); 2528 assert(SliceSize < DL.getTypeStoreSize(LI.getType()) && 2529 "Split load isn't smaller than original load"); 2530 assert(LI.getType()->getIntegerBitWidth() == 2531 DL.getTypeStoreSizeInBits(LI.getType()) && 2532 "Non-byte-multiple bit width"); 2533 // Move the insertion point just past the load so that we can refer to it. 2534 IRB.SetInsertPoint(&*std::next(BasicBlock::iterator(&LI))); 2535 // Create a placeholder value with the same type as LI to use as the 2536 // basis for the new value. This allows us to replace the uses of LI with 2537 // the computed value, and then replace the placeholder with LI, leaving 2538 // LI only used for this computation. 2539 Value *Placeholder = 2540 new LoadInst(UndefValue::get(LI.getType()->getPointerTo(AS))); 2541 V = insertInteger(DL, IRB, Placeholder, V, NewBeginOffset - BeginOffset, 2542 "insert"); 2543 LI.replaceAllUsesWith(V); 2544 Placeholder->replaceAllUsesWith(&LI); 2545 Placeholder->deleteValue(); 2546 } else { 2547 LI.replaceAllUsesWith(V); 2548 } 2549 2550 Pass.DeadInsts.insert(&LI); 2551 deleteIfTriviallyDead(OldOp); 2552 LLVM_DEBUG(dbgs() << " to: " << *V << "\n"); 2553 return !LI.isVolatile() && !IsPtrAdjusted; 2554 } 2555 2556 bool rewriteVectorizedStoreInst(Value *V, StoreInst &SI, Value *OldOp, 2557 AAMDNodes AATags) { 2558 if (V->getType() != VecTy) { 2559 unsigned BeginIndex = getIndex(NewBeginOffset); 2560 unsigned EndIndex = getIndex(NewEndOffset); 2561 assert(EndIndex > BeginIndex && "Empty vector!"); 2562 unsigned NumElements = EndIndex - BeginIndex; 2563 assert(NumElements <= VecTy->getNumElements() && "Too many elements!"); 2564 Type *SliceTy = (NumElements == 1) 2565 ? ElementTy 2566 : VectorType::get(ElementTy, NumElements); 2567 if (V->getType() != SliceTy) 2568 V = convertValue(DL, IRB, V, SliceTy); 2569 2570 // Mix in the existing elements. 2571 Value *Old = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), "load"); 2572 V = insertVector(IRB, Old, V, BeginIndex, "vec"); 2573 } 2574 StoreInst *Store = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlignment()); 2575 if (AATags) 2576 Store->setAAMetadata(AATags); 2577 Pass.DeadInsts.insert(&SI); 2578 2579 LLVM_DEBUG(dbgs() << " to: " << *Store << "\n"); 2580 return true; 2581 } 2582 2583 bool rewriteIntegerStore(Value *V, StoreInst &SI, AAMDNodes AATags) { 2584 assert(IntTy && "We cannot extract an integer from the alloca"); 2585 assert(!SI.isVolatile()); 2586 if (DL.getTypeSizeInBits(V->getType()) != IntTy->getBitWidth()) { 2587 Value *Old = 2588 IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), "oldload"); 2589 Old = convertValue(DL, IRB, Old, IntTy); 2590 assert(BeginOffset >= NewAllocaBeginOffset && "Out of bounds offset"); 2591 uint64_t Offset = BeginOffset - NewAllocaBeginOffset; 2592 V = insertInteger(DL, IRB, Old, SI.getValueOperand(), Offset, "insert"); 2593 } 2594 V = convertValue(DL, IRB, V, NewAllocaTy); 2595 StoreInst *Store = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlignment()); 2596 Store->copyMetadata(SI, LLVMContext::MD_mem_parallel_loop_access); 2597 if (AATags) 2598 Store->setAAMetadata(AATags); 2599 Pass.DeadInsts.insert(&SI); 2600 LLVM_DEBUG(dbgs() << " to: " << *Store << "\n"); 2601 return true; 2602 } 2603 2604 bool visitStoreInst(StoreInst &SI) { 2605 LLVM_DEBUG(dbgs() << " original: " << SI << "\n"); 2606 Value *OldOp = SI.getOperand(1); 2607 assert(OldOp == OldPtr); 2608 2609 AAMDNodes AATags; 2610 SI.getAAMetadata(AATags); 2611 2612 Value *V = SI.getValueOperand(); 2613 2614 // Strip all inbounds GEPs and pointer casts to try to dig out any root 2615 // alloca that should be re-examined after promoting this alloca. 2616 if (V->getType()->isPointerTy()) 2617 if (AllocaInst *AI = dyn_cast<AllocaInst>(V->stripInBoundsOffsets())) 2618 Pass.PostPromotionWorklist.insert(AI); 2619 2620 if (SliceSize < DL.getTypeStoreSize(V->getType())) { 2621 assert(!SI.isVolatile()); 2622 assert(V->getType()->isIntegerTy() && 2623 "Only integer type loads and stores are split"); 2624 assert(V->getType()->getIntegerBitWidth() == 2625 DL.getTypeStoreSizeInBits(V->getType()) && 2626 "Non-byte-multiple bit width"); 2627 IntegerType *NarrowTy = Type::getIntNTy(SI.getContext(), SliceSize * 8); 2628 V = extractInteger(DL, IRB, V, NarrowTy, NewBeginOffset - BeginOffset, 2629 "extract"); 2630 } 2631 2632 if (VecTy) 2633 return rewriteVectorizedStoreInst(V, SI, OldOp, AATags); 2634 if (IntTy && V->getType()->isIntegerTy()) 2635 return rewriteIntegerStore(V, SI, AATags); 2636 2637 const bool IsStorePastEnd = DL.getTypeStoreSize(V->getType()) > SliceSize; 2638 StoreInst *NewSI; 2639 if (NewBeginOffset == NewAllocaBeginOffset && 2640 NewEndOffset == NewAllocaEndOffset && 2641 (canConvertValue(DL, V->getType(), NewAllocaTy) || 2642 (IsStorePastEnd && NewAllocaTy->isIntegerTy() && 2643 V->getType()->isIntegerTy()))) { 2644 // If this is an integer store past the end of slice (and thus the bytes 2645 // past that point are irrelevant or this is unreachable), truncate the 2646 // value prior to storing. 2647 if (auto *VITy = dyn_cast<IntegerType>(V->getType())) 2648 if (auto *AITy = dyn_cast<IntegerType>(NewAllocaTy)) 2649 if (VITy->getBitWidth() > AITy->getBitWidth()) { 2650 if (DL.isBigEndian()) 2651 V = IRB.CreateLShr(V, VITy->getBitWidth() - AITy->getBitWidth(), 2652 "endian_shift"); 2653 V = IRB.CreateTrunc(V, AITy, "load.trunc"); 2654 } 2655 2656 V = convertValue(DL, IRB, V, NewAllocaTy); 2657 NewSI = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlignment(), 2658 SI.isVolatile()); 2659 } else { 2660 unsigned AS = SI.getPointerAddressSpace(); 2661 Value *NewPtr = getNewAllocaSlicePtr(IRB, V->getType()->getPointerTo(AS)); 2662 NewSI = IRB.CreateAlignedStore(V, NewPtr, getSliceAlign(V->getType()), 2663 SI.isVolatile()); 2664 } 2665 NewSI->copyMetadata(SI, LLVMContext::MD_mem_parallel_loop_access); 2666 if (AATags) 2667 NewSI->setAAMetadata(AATags); 2668 if (SI.isVolatile()) 2669 NewSI->setAtomic(SI.getOrdering(), SI.getSyncScopeID()); 2670 Pass.DeadInsts.insert(&SI); 2671 deleteIfTriviallyDead(OldOp); 2672 2673 LLVM_DEBUG(dbgs() << " to: " << *NewSI << "\n"); 2674 return NewSI->getPointerOperand() == &NewAI && !SI.isVolatile(); 2675 } 2676 2677 /// Compute an integer value from splatting an i8 across the given 2678 /// number of bytes. 2679 /// 2680 /// Note that this routine assumes an i8 is a byte. If that isn't true, don't 2681 /// call this routine. 2682 /// FIXME: Heed the advice above. 2683 /// 2684 /// \param V The i8 value to splat. 2685 /// \param Size The number of bytes in the output (assuming i8 is one byte) 2686 Value *getIntegerSplat(Value *V, unsigned Size) { 2687 assert(Size > 0 && "Expected a positive number of bytes."); 2688 IntegerType *VTy = cast<IntegerType>(V->getType()); 2689 assert(VTy->getBitWidth() == 8 && "Expected an i8 value for the byte"); 2690 if (Size == 1) 2691 return V; 2692 2693 Type *SplatIntTy = Type::getIntNTy(VTy->getContext(), Size * 8); 2694 V = IRB.CreateMul( 2695 IRB.CreateZExt(V, SplatIntTy, "zext"), 2696 ConstantExpr::getUDiv( 2697 Constant::getAllOnesValue(SplatIntTy), 2698 ConstantExpr::getZExt(Constant::getAllOnesValue(V->getType()), 2699 SplatIntTy)), 2700 "isplat"); 2701 return V; 2702 } 2703 2704 /// Compute a vector splat for a given element value. 2705 Value *getVectorSplat(Value *V, unsigned NumElements) { 2706 V = IRB.CreateVectorSplat(NumElements, V, "vsplat"); 2707 LLVM_DEBUG(dbgs() << " splat: " << *V << "\n"); 2708 return V; 2709 } 2710 2711 bool visitMemSetInst(MemSetInst &II) { 2712 LLVM_DEBUG(dbgs() << " original: " << II << "\n"); 2713 assert(II.getRawDest() == OldPtr); 2714 2715 AAMDNodes AATags; 2716 II.getAAMetadata(AATags); 2717 2718 // If the memset has a variable size, it cannot be split, just adjust the 2719 // pointer to the new alloca. 2720 if (!isa<Constant>(II.getLength())) { 2721 assert(!IsSplit); 2722 assert(NewBeginOffset == BeginOffset); 2723 II.setDest(getNewAllocaSlicePtr(IRB, OldPtr->getType())); 2724 II.setDestAlignment(getSliceAlign()); 2725 2726 deleteIfTriviallyDead(OldPtr); 2727 return false; 2728 } 2729 2730 // Record this instruction for deletion. 2731 Pass.DeadInsts.insert(&II); 2732 2733 Type *AllocaTy = NewAI.getAllocatedType(); 2734 Type *ScalarTy = AllocaTy->getScalarType(); 2735 2736 // If this doesn't map cleanly onto the alloca type, and that type isn't 2737 // a single value type, just emit a memset. 2738 if (!VecTy && !IntTy && 2739 (BeginOffset > NewAllocaBeginOffset || EndOffset < NewAllocaEndOffset || 2740 SliceSize != DL.getTypeStoreSize(AllocaTy) || 2741 !AllocaTy->isSingleValueType() || 2742 !DL.isLegalInteger(DL.getTypeSizeInBits(ScalarTy)) || 2743 DL.getTypeSizeInBits(ScalarTy) % 8 != 0)) { 2744 Type *SizeTy = II.getLength()->getType(); 2745 Constant *Size = ConstantInt::get(SizeTy, NewEndOffset - NewBeginOffset); 2746 CallInst *New = IRB.CreateMemSet( 2747 getNewAllocaSlicePtr(IRB, OldPtr->getType()), II.getValue(), Size, 2748 getSliceAlign(), II.isVolatile()); 2749 if (AATags) 2750 New->setAAMetadata(AATags); 2751 LLVM_DEBUG(dbgs() << " to: " << *New << "\n"); 2752 return false; 2753 } 2754 2755 // If we can represent this as a simple value, we have to build the actual 2756 // value to store, which requires expanding the byte present in memset to 2757 // a sensible representation for the alloca type. This is essentially 2758 // splatting the byte to a sufficiently wide integer, splatting it across 2759 // any desired vector width, and bitcasting to the final type. 2760 Value *V; 2761 2762 if (VecTy) { 2763 // If this is a memset of a vectorized alloca, insert it. 2764 assert(ElementTy == ScalarTy); 2765 2766 unsigned BeginIndex = getIndex(NewBeginOffset); 2767 unsigned EndIndex = getIndex(NewEndOffset); 2768 assert(EndIndex > BeginIndex && "Empty vector!"); 2769 unsigned NumElements = EndIndex - BeginIndex; 2770 assert(NumElements <= VecTy->getNumElements() && "Too many elements!"); 2771 2772 Value *Splat = 2773 getIntegerSplat(II.getValue(), DL.getTypeSizeInBits(ElementTy) / 8); 2774 Splat = convertValue(DL, IRB, Splat, ElementTy); 2775 if (NumElements > 1) 2776 Splat = getVectorSplat(Splat, NumElements); 2777 2778 Value *Old = 2779 IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), "oldload"); 2780 V = insertVector(IRB, Old, Splat, BeginIndex, "vec"); 2781 } else if (IntTy) { 2782 // If this is a memset on an alloca where we can widen stores, insert the 2783 // set integer. 2784 assert(!II.isVolatile()); 2785 2786 uint64_t Size = NewEndOffset - NewBeginOffset; 2787 V = getIntegerSplat(II.getValue(), Size); 2788 2789 if (IntTy && (BeginOffset != NewAllocaBeginOffset || 2790 EndOffset != NewAllocaBeginOffset)) { 2791 Value *Old = 2792 IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), "oldload"); 2793 Old = convertValue(DL, IRB, Old, IntTy); 2794 uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset; 2795 V = insertInteger(DL, IRB, Old, V, Offset, "insert"); 2796 } else { 2797 assert(V->getType() == IntTy && 2798 "Wrong type for an alloca wide integer!"); 2799 } 2800 V = convertValue(DL, IRB, V, AllocaTy); 2801 } else { 2802 // Established these invariants above. 2803 assert(NewBeginOffset == NewAllocaBeginOffset); 2804 assert(NewEndOffset == NewAllocaEndOffset); 2805 2806 V = getIntegerSplat(II.getValue(), DL.getTypeSizeInBits(ScalarTy) / 8); 2807 if (VectorType *AllocaVecTy = dyn_cast<VectorType>(AllocaTy)) 2808 V = getVectorSplat(V, AllocaVecTy->getNumElements()); 2809 2810 V = convertValue(DL, IRB, V, AllocaTy); 2811 } 2812 2813 StoreInst *New = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlignment(), 2814 II.isVolatile()); 2815 if (AATags) 2816 New->setAAMetadata(AATags); 2817 LLVM_DEBUG(dbgs() << " to: " << *New << "\n"); 2818 return !II.isVolatile(); 2819 } 2820 2821 bool visitMemTransferInst(MemTransferInst &II) { 2822 // Rewriting of memory transfer instructions can be a bit tricky. We break 2823 // them into two categories: split intrinsics and unsplit intrinsics. 2824 2825 LLVM_DEBUG(dbgs() << " original: " << II << "\n"); 2826 2827 AAMDNodes AATags; 2828 II.getAAMetadata(AATags); 2829 2830 bool IsDest = &II.getRawDestUse() == OldUse; 2831 assert((IsDest && II.getRawDest() == OldPtr) || 2832 (!IsDest && II.getRawSource() == OldPtr)); 2833 2834 unsigned SliceAlign = getSliceAlign(); 2835 2836 // For unsplit intrinsics, we simply modify the source and destination 2837 // pointers in place. This isn't just an optimization, it is a matter of 2838 // correctness. With unsplit intrinsics we may be dealing with transfers 2839 // within a single alloca before SROA ran, or with transfers that have 2840 // a variable length. We may also be dealing with memmove instead of 2841 // memcpy, and so simply updating the pointers is the necessary for us to 2842 // update both source and dest of a single call. 2843 if (!IsSplittable) { 2844 Value *AdjustedPtr = getNewAllocaSlicePtr(IRB, OldPtr->getType()); 2845 if (IsDest) { 2846 II.setDest(AdjustedPtr); 2847 II.setDestAlignment(SliceAlign); 2848 } 2849 else { 2850 II.setSource(AdjustedPtr); 2851 II.setSourceAlignment(SliceAlign); 2852 } 2853 2854 LLVM_DEBUG(dbgs() << " to: " << II << "\n"); 2855 deleteIfTriviallyDead(OldPtr); 2856 return false; 2857 } 2858 // For split transfer intrinsics we have an incredibly useful assurance: 2859 // the source and destination do not reside within the same alloca, and at 2860 // least one of them does not escape. This means that we can replace 2861 // memmove with memcpy, and we don't need to worry about all manner of 2862 // downsides to splitting and transforming the operations. 2863 2864 // If this doesn't map cleanly onto the alloca type, and that type isn't 2865 // a single value type, just emit a memcpy. 2866 bool EmitMemCpy = 2867 !VecTy && !IntTy && 2868 (BeginOffset > NewAllocaBeginOffset || EndOffset < NewAllocaEndOffset || 2869 SliceSize != DL.getTypeStoreSize(NewAI.getAllocatedType()) || 2870 !NewAI.getAllocatedType()->isSingleValueType()); 2871 2872 // If we're just going to emit a memcpy, the alloca hasn't changed, and the 2873 // size hasn't been shrunk based on analysis of the viable range, this is 2874 // a no-op. 2875 if (EmitMemCpy && &OldAI == &NewAI) { 2876 // Ensure the start lines up. 2877 assert(NewBeginOffset == BeginOffset); 2878 2879 // Rewrite the size as needed. 2880 if (NewEndOffset != EndOffset) 2881 II.setLength(ConstantInt::get(II.getLength()->getType(), 2882 NewEndOffset - NewBeginOffset)); 2883 return false; 2884 } 2885 // Record this instruction for deletion. 2886 Pass.DeadInsts.insert(&II); 2887 2888 // Strip all inbounds GEPs and pointer casts to try to dig out any root 2889 // alloca that should be re-examined after rewriting this instruction. 2890 Value *OtherPtr = IsDest ? II.getRawSource() : II.getRawDest(); 2891 if (AllocaInst *AI = 2892 dyn_cast<AllocaInst>(OtherPtr->stripInBoundsOffsets())) { 2893 assert(AI != &OldAI && AI != &NewAI && 2894 "Splittable transfers cannot reach the same alloca on both ends."); 2895 Pass.Worklist.insert(AI); 2896 } 2897 2898 Type *OtherPtrTy = OtherPtr->getType(); 2899 unsigned OtherAS = OtherPtrTy->getPointerAddressSpace(); 2900 2901 // Compute the relative offset for the other pointer within the transfer. 2902 unsigned OffsetWidth = DL.getIndexSizeInBits(OtherAS); 2903 APInt OtherOffset(OffsetWidth, NewBeginOffset - BeginOffset); 2904 unsigned OtherAlign = 2905 IsDest ? II.getSourceAlignment() : II.getDestAlignment(); 2906 OtherAlign = MinAlign(OtherAlign ? OtherAlign : 1, 2907 OtherOffset.zextOrTrunc(64).getZExtValue()); 2908 2909 if (EmitMemCpy) { 2910 // Compute the other pointer, folding as much as possible to produce 2911 // a single, simple GEP in most cases. 2912 OtherPtr = getAdjustedPtr(IRB, DL, OtherPtr, OtherOffset, OtherPtrTy, 2913 OtherPtr->getName() + "."); 2914 2915 Value *OurPtr = getNewAllocaSlicePtr(IRB, OldPtr->getType()); 2916 Type *SizeTy = II.getLength()->getType(); 2917 Constant *Size = ConstantInt::get(SizeTy, NewEndOffset - NewBeginOffset); 2918 2919 Value *DestPtr, *SrcPtr; 2920 unsigned DestAlign, SrcAlign; 2921 // Note: IsDest is true iff we're copying into the new alloca slice 2922 if (IsDest) { 2923 DestPtr = OurPtr; 2924 DestAlign = SliceAlign; 2925 SrcPtr = OtherPtr; 2926 SrcAlign = OtherAlign; 2927 } else { 2928 DestPtr = OtherPtr; 2929 DestAlign = OtherAlign; 2930 SrcPtr = OurPtr; 2931 SrcAlign = SliceAlign; 2932 } 2933 CallInst *New = IRB.CreateMemCpy(DestPtr, DestAlign, SrcPtr, SrcAlign, 2934 Size, II.isVolatile()); 2935 if (AATags) 2936 New->setAAMetadata(AATags); 2937 LLVM_DEBUG(dbgs() << " to: " << *New << "\n"); 2938 return false; 2939 } 2940 2941 bool IsWholeAlloca = NewBeginOffset == NewAllocaBeginOffset && 2942 NewEndOffset == NewAllocaEndOffset; 2943 uint64_t Size = NewEndOffset - NewBeginOffset; 2944 unsigned BeginIndex = VecTy ? getIndex(NewBeginOffset) : 0; 2945 unsigned EndIndex = VecTy ? getIndex(NewEndOffset) : 0; 2946 unsigned NumElements = EndIndex - BeginIndex; 2947 IntegerType *SubIntTy = 2948 IntTy ? Type::getIntNTy(IntTy->getContext(), Size * 8) : nullptr; 2949 2950 // Reset the other pointer type to match the register type we're going to 2951 // use, but using the address space of the original other pointer. 2952 if (VecTy && !IsWholeAlloca) { 2953 if (NumElements == 1) 2954 OtherPtrTy = VecTy->getElementType(); 2955 else 2956 OtherPtrTy = VectorType::get(VecTy->getElementType(), NumElements); 2957 2958 OtherPtrTy = OtherPtrTy->getPointerTo(OtherAS); 2959 } else if (IntTy && !IsWholeAlloca) { 2960 OtherPtrTy = SubIntTy->getPointerTo(OtherAS); 2961 } else { 2962 OtherPtrTy = NewAllocaTy->getPointerTo(OtherAS); 2963 } 2964 2965 Value *SrcPtr = getAdjustedPtr(IRB, DL, OtherPtr, OtherOffset, OtherPtrTy, 2966 OtherPtr->getName() + "."); 2967 unsigned SrcAlign = OtherAlign; 2968 Value *DstPtr = &NewAI; 2969 unsigned DstAlign = SliceAlign; 2970 if (!IsDest) { 2971 std::swap(SrcPtr, DstPtr); 2972 std::swap(SrcAlign, DstAlign); 2973 } 2974 2975 Value *Src; 2976 if (VecTy && !IsWholeAlloca && !IsDest) { 2977 Src = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), "load"); 2978 Src = extractVector(IRB, Src, BeginIndex, EndIndex, "vec"); 2979 } else if (IntTy && !IsWholeAlloca && !IsDest) { 2980 Src = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), "load"); 2981 Src = convertValue(DL, IRB, Src, IntTy); 2982 uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset; 2983 Src = extractInteger(DL, IRB, Src, SubIntTy, Offset, "extract"); 2984 } else { 2985 LoadInst *Load = IRB.CreateAlignedLoad(SrcPtr, SrcAlign, II.isVolatile(), 2986 "copyload"); 2987 if (AATags) 2988 Load->setAAMetadata(AATags); 2989 Src = Load; 2990 } 2991 2992 if (VecTy && !IsWholeAlloca && IsDest) { 2993 Value *Old = 2994 IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), "oldload"); 2995 Src = insertVector(IRB, Old, Src, BeginIndex, "vec"); 2996 } else if (IntTy && !IsWholeAlloca && IsDest) { 2997 Value *Old = 2998 IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), "oldload"); 2999 Old = convertValue(DL, IRB, Old, IntTy); 3000 uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset; 3001 Src = insertInteger(DL, IRB, Old, Src, Offset, "insert"); 3002 Src = convertValue(DL, IRB, Src, NewAllocaTy); 3003 } 3004 3005 StoreInst *Store = cast<StoreInst>( 3006 IRB.CreateAlignedStore(Src, DstPtr, DstAlign, II.isVolatile())); 3007 if (AATags) 3008 Store->setAAMetadata(AATags); 3009 LLVM_DEBUG(dbgs() << " to: " << *Store << "\n"); 3010 return !II.isVolatile(); 3011 } 3012 3013 bool visitIntrinsicInst(IntrinsicInst &II) { 3014 assert(II.getIntrinsicID() == Intrinsic::lifetime_start || 3015 II.getIntrinsicID() == Intrinsic::lifetime_end); 3016 LLVM_DEBUG(dbgs() << " original: " << II << "\n"); 3017 assert(II.getArgOperand(1) == OldPtr); 3018 3019 // Record this instruction for deletion. 3020 Pass.DeadInsts.insert(&II); 3021 3022 // Lifetime intrinsics are only promotable if they cover the whole alloca. 3023 // Therefore, we drop lifetime intrinsics which don't cover the whole 3024 // alloca. 3025 // (In theory, intrinsics which partially cover an alloca could be 3026 // promoted, but PromoteMemToReg doesn't handle that case.) 3027 // FIXME: Check whether the alloca is promotable before dropping the 3028 // lifetime intrinsics? 3029 if (NewBeginOffset != NewAllocaBeginOffset || 3030 NewEndOffset != NewAllocaEndOffset) 3031 return true; 3032 3033 ConstantInt *Size = 3034 ConstantInt::get(cast<IntegerType>(II.getArgOperand(0)->getType()), 3035 NewEndOffset - NewBeginOffset); 3036 Value *Ptr = getNewAllocaSlicePtr(IRB, OldPtr->getType()); 3037 Value *New; 3038 if (II.getIntrinsicID() == Intrinsic::lifetime_start) 3039 New = IRB.CreateLifetimeStart(Ptr, Size); 3040 else 3041 New = IRB.CreateLifetimeEnd(Ptr, Size); 3042 3043 (void)New; 3044 LLVM_DEBUG(dbgs() << " to: " << *New << "\n"); 3045 3046 return true; 3047 } 3048 3049 void fixLoadStoreAlign(Instruction &Root) { 3050 // This algorithm implements the same visitor loop as 3051 // hasUnsafePHIOrSelectUse, and fixes the alignment of each load 3052 // or store found. 3053 SmallPtrSet<Instruction *, 4> Visited; 3054 SmallVector<Instruction *, 4> Uses; 3055 Visited.insert(&Root); 3056 Uses.push_back(&Root); 3057 do { 3058 Instruction *I = Uses.pop_back_val(); 3059 3060 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 3061 unsigned LoadAlign = LI->getAlignment(); 3062 if (!LoadAlign) 3063 LoadAlign = DL.getABITypeAlignment(LI->getType()); 3064 LI->setAlignment(std::min(LoadAlign, getSliceAlign())); 3065 continue; 3066 } 3067 if (StoreInst *SI = dyn_cast<StoreInst>(I)) { 3068 unsigned StoreAlign = SI->getAlignment(); 3069 if (!StoreAlign) { 3070 Value *Op = SI->getOperand(0); 3071 StoreAlign = DL.getABITypeAlignment(Op->getType()); 3072 } 3073 SI->setAlignment(std::min(StoreAlign, getSliceAlign())); 3074 continue; 3075 } 3076 3077 assert(isa<BitCastInst>(I) || isa<PHINode>(I) || 3078 isa<SelectInst>(I) || isa<GetElementPtrInst>(I)); 3079 for (User *U : I->users()) 3080 if (Visited.insert(cast<Instruction>(U)).second) 3081 Uses.push_back(cast<Instruction>(U)); 3082 } while (!Uses.empty()); 3083 } 3084 3085 bool visitPHINode(PHINode &PN) { 3086 LLVM_DEBUG(dbgs() << " original: " << PN << "\n"); 3087 assert(BeginOffset >= NewAllocaBeginOffset && "PHIs are unsplittable"); 3088 assert(EndOffset <= NewAllocaEndOffset && "PHIs are unsplittable"); 3089 3090 // We would like to compute a new pointer in only one place, but have it be 3091 // as local as possible to the PHI. To do that, we re-use the location of 3092 // the old pointer, which necessarily must be in the right position to 3093 // dominate the PHI. 3094 IRBuilderTy PtrBuilder(IRB); 3095 if (isa<PHINode>(OldPtr)) 3096 PtrBuilder.SetInsertPoint(&*OldPtr->getParent()->getFirstInsertionPt()); 3097 else 3098 PtrBuilder.SetInsertPoint(OldPtr); 3099 PtrBuilder.SetCurrentDebugLocation(OldPtr->getDebugLoc()); 3100 3101 Value *NewPtr = getNewAllocaSlicePtr(PtrBuilder, OldPtr->getType()); 3102 // Replace the operands which were using the old pointer. 3103 std::replace(PN.op_begin(), PN.op_end(), cast<Value>(OldPtr), NewPtr); 3104 3105 LLVM_DEBUG(dbgs() << " to: " << PN << "\n"); 3106 deleteIfTriviallyDead(OldPtr); 3107 3108 // Fix the alignment of any loads or stores using this PHI node. 3109 fixLoadStoreAlign(PN); 3110 3111 // PHIs can't be promoted on their own, but often can be speculated. We 3112 // check the speculation outside of the rewriter so that we see the 3113 // fully-rewritten alloca. 3114 PHIUsers.insert(&PN); 3115 return true; 3116 } 3117 3118 bool visitSelectInst(SelectInst &SI) { 3119 LLVM_DEBUG(dbgs() << " original: " << SI << "\n"); 3120 assert((SI.getTrueValue() == OldPtr || SI.getFalseValue() == OldPtr) && 3121 "Pointer isn't an operand!"); 3122 assert(BeginOffset >= NewAllocaBeginOffset && "Selects are unsplittable"); 3123 assert(EndOffset <= NewAllocaEndOffset && "Selects are unsplittable"); 3124 3125 Value *NewPtr = getNewAllocaSlicePtr(IRB, OldPtr->getType()); 3126 // Replace the operands which were using the old pointer. 3127 if (SI.getOperand(1) == OldPtr) 3128 SI.setOperand(1, NewPtr); 3129 if (SI.getOperand(2) == OldPtr) 3130 SI.setOperand(2, NewPtr); 3131 3132 LLVM_DEBUG(dbgs() << " to: " << SI << "\n"); 3133 deleteIfTriviallyDead(OldPtr); 3134 3135 // Fix the alignment of any loads or stores using this select. 3136 fixLoadStoreAlign(SI); 3137 3138 // Selects can't be promoted on their own, but often can be speculated. We 3139 // check the speculation outside of the rewriter so that we see the 3140 // fully-rewritten alloca. 3141 SelectUsers.insert(&SI); 3142 return true; 3143 } 3144 }; 3145 3146 namespace { 3147 3148 /// Visitor to rewrite aggregate loads and stores as scalar. 3149 /// 3150 /// This pass aggressively rewrites all aggregate loads and stores on 3151 /// a particular pointer (or any pointer derived from it which we can identify) 3152 /// with scalar loads and stores. 3153 class AggLoadStoreRewriter : public InstVisitor<AggLoadStoreRewriter, bool> { 3154 // Befriend the base class so it can delegate to private visit methods. 3155 friend class InstVisitor<AggLoadStoreRewriter, bool>; 3156 3157 /// Queue of pointer uses to analyze and potentially rewrite. 3158 SmallVector<Use *, 8> Queue; 3159 3160 /// Set to prevent us from cycling with phi nodes and loops. 3161 SmallPtrSet<User *, 8> Visited; 3162 3163 /// The current pointer use being rewritten. This is used to dig up the used 3164 /// value (as opposed to the user). 3165 Use *U; 3166 3167 public: 3168 /// Rewrite loads and stores through a pointer and all pointers derived from 3169 /// it. 3170 bool rewrite(Instruction &I) { 3171 LLVM_DEBUG(dbgs() << " Rewriting FCA loads and stores...\n"); 3172 enqueueUsers(I); 3173 bool Changed = false; 3174 while (!Queue.empty()) { 3175 U = Queue.pop_back_val(); 3176 Changed |= visit(cast<Instruction>(U->getUser())); 3177 } 3178 return Changed; 3179 } 3180 3181 private: 3182 /// Enqueue all the users of the given instruction for further processing. 3183 /// This uses a set to de-duplicate users. 3184 void enqueueUsers(Instruction &I) { 3185 for (Use &U : I.uses()) 3186 if (Visited.insert(U.getUser()).second) 3187 Queue.push_back(&U); 3188 } 3189 3190 // Conservative default is to not rewrite anything. 3191 bool visitInstruction(Instruction &I) { return false; } 3192 3193 /// Generic recursive split emission class. 3194 template <typename Derived> class OpSplitter { 3195 protected: 3196 /// The builder used to form new instructions. 3197 IRBuilderTy IRB; 3198 3199 /// The indices which to be used with insert- or extractvalue to select the 3200 /// appropriate value within the aggregate. 3201 SmallVector<unsigned, 4> Indices; 3202 3203 /// The indices to a GEP instruction which will move Ptr to the correct slot 3204 /// within the aggregate. 3205 SmallVector<Value *, 4> GEPIndices; 3206 3207 /// The base pointer of the original op, used as a base for GEPing the 3208 /// split operations. 3209 Value *Ptr; 3210 3211 /// Initialize the splitter with an insertion point, Ptr and start with a 3212 /// single zero GEP index. 3213 OpSplitter(Instruction *InsertionPoint, Value *Ptr) 3214 : IRB(InsertionPoint), GEPIndices(1, IRB.getInt32(0)), Ptr(Ptr) {} 3215 3216 public: 3217 /// Generic recursive split emission routine. 3218 /// 3219 /// This method recursively splits an aggregate op (load or store) into 3220 /// scalar or vector ops. It splits recursively until it hits a single value 3221 /// and emits that single value operation via the template argument. 3222 /// 3223 /// The logic of this routine relies on GEPs and insertvalue and 3224 /// extractvalue all operating with the same fundamental index list, merely 3225 /// formatted differently (GEPs need actual values). 3226 /// 3227 /// \param Ty The type being split recursively into smaller ops. 3228 /// \param Agg The aggregate value being built up or stored, depending on 3229 /// whether this is splitting a load or a store respectively. 3230 void emitSplitOps(Type *Ty, Value *&Agg, const Twine &Name) { 3231 if (Ty->isSingleValueType()) 3232 return static_cast<Derived *>(this)->emitFunc(Ty, Agg, Name); 3233 3234 if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) { 3235 unsigned OldSize = Indices.size(); 3236 (void)OldSize; 3237 for (unsigned Idx = 0, Size = ATy->getNumElements(); Idx != Size; 3238 ++Idx) { 3239 assert(Indices.size() == OldSize && "Did not return to the old size"); 3240 Indices.push_back(Idx); 3241 GEPIndices.push_back(IRB.getInt32(Idx)); 3242 emitSplitOps(ATy->getElementType(), Agg, Name + "." + Twine(Idx)); 3243 GEPIndices.pop_back(); 3244 Indices.pop_back(); 3245 } 3246 return; 3247 } 3248 3249 if (StructType *STy = dyn_cast<StructType>(Ty)) { 3250 unsigned OldSize = Indices.size(); 3251 (void)OldSize; 3252 for (unsigned Idx = 0, Size = STy->getNumElements(); Idx != Size; 3253 ++Idx) { 3254 assert(Indices.size() == OldSize && "Did not return to the old size"); 3255 Indices.push_back(Idx); 3256 GEPIndices.push_back(IRB.getInt32(Idx)); 3257 emitSplitOps(STy->getElementType(Idx), Agg, Name + "." + Twine(Idx)); 3258 GEPIndices.pop_back(); 3259 Indices.pop_back(); 3260 } 3261 return; 3262 } 3263 3264 llvm_unreachable("Only arrays and structs are aggregate loadable types"); 3265 } 3266 }; 3267 3268 struct LoadOpSplitter : public OpSplitter<LoadOpSplitter> { 3269 AAMDNodes AATags; 3270 3271 LoadOpSplitter(Instruction *InsertionPoint, Value *Ptr, AAMDNodes AATags) 3272 : OpSplitter<LoadOpSplitter>(InsertionPoint, Ptr), AATags(AATags) {} 3273 3274 /// Emit a leaf load of a single value. This is called at the leaves of the 3275 /// recursive emission to actually load values. 3276 void emitFunc(Type *Ty, Value *&Agg, const Twine &Name) { 3277 assert(Ty->isSingleValueType()); 3278 // Load the single value and insert it using the indices. 3279 Value *GEP = 3280 IRB.CreateInBoundsGEP(nullptr, Ptr, GEPIndices, Name + ".gep"); 3281 LoadInst *Load = IRB.CreateLoad(GEP, Name + ".load"); 3282 if (AATags) 3283 Load->setAAMetadata(AATags); 3284 Agg = IRB.CreateInsertValue(Agg, Load, Indices, Name + ".insert"); 3285 LLVM_DEBUG(dbgs() << " to: " << *Load << "\n"); 3286 } 3287 }; 3288 3289 bool visitLoadInst(LoadInst &LI) { 3290 assert(LI.getPointerOperand() == *U); 3291 if (!LI.isSimple() || LI.getType()->isSingleValueType()) 3292 return false; 3293 3294 // We have an aggregate being loaded, split it apart. 3295 LLVM_DEBUG(dbgs() << " original: " << LI << "\n"); 3296 AAMDNodes AATags; 3297 LI.getAAMetadata(AATags); 3298 LoadOpSplitter Splitter(&LI, *U, AATags); 3299 Value *V = UndefValue::get(LI.getType()); 3300 Splitter.emitSplitOps(LI.getType(), V, LI.getName() + ".fca"); 3301 LI.replaceAllUsesWith(V); 3302 LI.eraseFromParent(); 3303 return true; 3304 } 3305 3306 struct StoreOpSplitter : public OpSplitter<StoreOpSplitter> { 3307 StoreOpSplitter(Instruction *InsertionPoint, Value *Ptr, AAMDNodes AATags) 3308 : OpSplitter<StoreOpSplitter>(InsertionPoint, Ptr), AATags(AATags) {} 3309 AAMDNodes AATags; 3310 3311 /// Emit a leaf store of a single value. This is called at the leaves of the 3312 /// recursive emission to actually produce stores. 3313 void emitFunc(Type *Ty, Value *&Agg, const Twine &Name) { 3314 assert(Ty->isSingleValueType()); 3315 // Extract the single value and store it using the indices. 3316 // 3317 // The gep and extractvalue values are factored out of the CreateStore 3318 // call to make the output independent of the argument evaluation order. 3319 Value *ExtractValue = 3320 IRB.CreateExtractValue(Agg, Indices, Name + ".extract"); 3321 Value *InBoundsGEP = 3322 IRB.CreateInBoundsGEP(nullptr, Ptr, GEPIndices, Name + ".gep"); 3323 StoreInst *Store = IRB.CreateStore(ExtractValue, InBoundsGEP); 3324 if (AATags) 3325 Store->setAAMetadata(AATags); 3326 LLVM_DEBUG(dbgs() << " to: " << *Store << "\n"); 3327 } 3328 }; 3329 3330 bool visitStoreInst(StoreInst &SI) { 3331 if (!SI.isSimple() || SI.getPointerOperand() != *U) 3332 return false; 3333 Value *V = SI.getValueOperand(); 3334 if (V->getType()->isSingleValueType()) 3335 return false; 3336 3337 // We have an aggregate being stored, split it apart. 3338 LLVM_DEBUG(dbgs() << " original: " << SI << "\n"); 3339 AAMDNodes AATags; 3340 SI.getAAMetadata(AATags); 3341 StoreOpSplitter Splitter(&SI, *U, AATags); 3342 Splitter.emitSplitOps(V->getType(), V, V->getName() + ".fca"); 3343 SI.eraseFromParent(); 3344 return true; 3345 } 3346 3347 bool visitBitCastInst(BitCastInst &BC) { 3348 enqueueUsers(BC); 3349 return false; 3350 } 3351 3352 bool visitGetElementPtrInst(GetElementPtrInst &GEPI) { 3353 enqueueUsers(GEPI); 3354 return false; 3355 } 3356 3357 bool visitPHINode(PHINode &PN) { 3358 enqueueUsers(PN); 3359 return false; 3360 } 3361 3362 bool visitSelectInst(SelectInst &SI) { 3363 enqueueUsers(SI); 3364 return false; 3365 } 3366 }; 3367 3368 } // end anonymous namespace 3369 3370 /// Strip aggregate type wrapping. 3371 /// 3372 /// This removes no-op aggregate types wrapping an underlying type. It will 3373 /// strip as many layers of types as it can without changing either the type 3374 /// size or the allocated size. 3375 static Type *stripAggregateTypeWrapping(const DataLayout &DL, Type *Ty) { 3376 if (Ty->isSingleValueType()) 3377 return Ty; 3378 3379 uint64_t AllocSize = DL.getTypeAllocSize(Ty); 3380 uint64_t TypeSize = DL.getTypeSizeInBits(Ty); 3381 3382 Type *InnerTy; 3383 if (ArrayType *ArrTy = dyn_cast<ArrayType>(Ty)) { 3384 InnerTy = ArrTy->getElementType(); 3385 } else if (StructType *STy = dyn_cast<StructType>(Ty)) { 3386 const StructLayout *SL = DL.getStructLayout(STy); 3387 unsigned Index = SL->getElementContainingOffset(0); 3388 InnerTy = STy->getElementType(Index); 3389 } else { 3390 return Ty; 3391 } 3392 3393 if (AllocSize > DL.getTypeAllocSize(InnerTy) || 3394 TypeSize > DL.getTypeSizeInBits(InnerTy)) 3395 return Ty; 3396 3397 return stripAggregateTypeWrapping(DL, InnerTy); 3398 } 3399 3400 /// Try to find a partition of the aggregate type passed in for a given 3401 /// offset and size. 3402 /// 3403 /// This recurses through the aggregate type and tries to compute a subtype 3404 /// based on the offset and size. When the offset and size span a sub-section 3405 /// of an array, it will even compute a new array type for that sub-section, 3406 /// and the same for structs. 3407 /// 3408 /// Note that this routine is very strict and tries to find a partition of the 3409 /// type which produces the *exact* right offset and size. It is not forgiving 3410 /// when the size or offset cause either end of type-based partition to be off. 3411 /// Also, this is a best-effort routine. It is reasonable to give up and not 3412 /// return a type if necessary. 3413 static Type *getTypePartition(const DataLayout &DL, Type *Ty, uint64_t Offset, 3414 uint64_t Size) { 3415 if (Offset == 0 && DL.getTypeAllocSize(Ty) == Size) 3416 return stripAggregateTypeWrapping(DL, Ty); 3417 if (Offset > DL.getTypeAllocSize(Ty) || 3418 (DL.getTypeAllocSize(Ty) - Offset) < Size) 3419 return nullptr; 3420 3421 if (SequentialType *SeqTy = dyn_cast<SequentialType>(Ty)) { 3422 Type *ElementTy = SeqTy->getElementType(); 3423 uint64_t ElementSize = DL.getTypeAllocSize(ElementTy); 3424 uint64_t NumSkippedElements = Offset / ElementSize; 3425 if (NumSkippedElements >= SeqTy->getNumElements()) 3426 return nullptr; 3427 Offset -= NumSkippedElements * ElementSize; 3428 3429 // First check if we need to recurse. 3430 if (Offset > 0 || Size < ElementSize) { 3431 // Bail if the partition ends in a different array element. 3432 if ((Offset + Size) > ElementSize) 3433 return nullptr; 3434 // Recurse through the element type trying to peel off offset bytes. 3435 return getTypePartition(DL, ElementTy, Offset, Size); 3436 } 3437 assert(Offset == 0); 3438 3439 if (Size == ElementSize) 3440 return stripAggregateTypeWrapping(DL, ElementTy); 3441 assert(Size > ElementSize); 3442 uint64_t NumElements = Size / ElementSize; 3443 if (NumElements * ElementSize != Size) 3444 return nullptr; 3445 return ArrayType::get(ElementTy, NumElements); 3446 } 3447 3448 StructType *STy = dyn_cast<StructType>(Ty); 3449 if (!STy) 3450 return nullptr; 3451 3452 const StructLayout *SL = DL.getStructLayout(STy); 3453 if (Offset >= SL->getSizeInBytes()) 3454 return nullptr; 3455 uint64_t EndOffset = Offset + Size; 3456 if (EndOffset > SL->getSizeInBytes()) 3457 return nullptr; 3458 3459 unsigned Index = SL->getElementContainingOffset(Offset); 3460 Offset -= SL->getElementOffset(Index); 3461 3462 Type *ElementTy = STy->getElementType(Index); 3463 uint64_t ElementSize = DL.getTypeAllocSize(ElementTy); 3464 if (Offset >= ElementSize) 3465 return nullptr; // The offset points into alignment padding. 3466 3467 // See if any partition must be contained by the element. 3468 if (Offset > 0 || Size < ElementSize) { 3469 if ((Offset + Size) > ElementSize) 3470 return nullptr; 3471 return getTypePartition(DL, ElementTy, Offset, Size); 3472 } 3473 assert(Offset == 0); 3474 3475 if (Size == ElementSize) 3476 return stripAggregateTypeWrapping(DL, ElementTy); 3477 3478 StructType::element_iterator EI = STy->element_begin() + Index, 3479 EE = STy->element_end(); 3480 if (EndOffset < SL->getSizeInBytes()) { 3481 unsigned EndIndex = SL->getElementContainingOffset(EndOffset); 3482 if (Index == EndIndex) 3483 return nullptr; // Within a single element and its padding. 3484 3485 // Don't try to form "natural" types if the elements don't line up with the 3486 // expected size. 3487 // FIXME: We could potentially recurse down through the last element in the 3488 // sub-struct to find a natural end point. 3489 if (SL->getElementOffset(EndIndex) != EndOffset) 3490 return nullptr; 3491 3492 assert(Index < EndIndex); 3493 EE = STy->element_begin() + EndIndex; 3494 } 3495 3496 // Try to build up a sub-structure. 3497 StructType *SubTy = 3498 StructType::get(STy->getContext(), makeArrayRef(EI, EE), STy->isPacked()); 3499 const StructLayout *SubSL = DL.getStructLayout(SubTy); 3500 if (Size != SubSL->getSizeInBytes()) 3501 return nullptr; // The sub-struct doesn't have quite the size needed. 3502 3503 return SubTy; 3504 } 3505 3506 /// Pre-split loads and stores to simplify rewriting. 3507 /// 3508 /// We want to break up the splittable load+store pairs as much as 3509 /// possible. This is important to do as a preprocessing step, as once we 3510 /// start rewriting the accesses to partitions of the alloca we lose the 3511 /// necessary information to correctly split apart paired loads and stores 3512 /// which both point into this alloca. The case to consider is something like 3513 /// the following: 3514 /// 3515 /// %a = alloca [12 x i8] 3516 /// %gep1 = getelementptr [12 x i8]* %a, i32 0, i32 0 3517 /// %gep2 = getelementptr [12 x i8]* %a, i32 0, i32 4 3518 /// %gep3 = getelementptr [12 x i8]* %a, i32 0, i32 8 3519 /// %iptr1 = bitcast i8* %gep1 to i64* 3520 /// %iptr2 = bitcast i8* %gep2 to i64* 3521 /// %fptr1 = bitcast i8* %gep1 to float* 3522 /// %fptr2 = bitcast i8* %gep2 to float* 3523 /// %fptr3 = bitcast i8* %gep3 to float* 3524 /// store float 0.0, float* %fptr1 3525 /// store float 1.0, float* %fptr2 3526 /// %v = load i64* %iptr1 3527 /// store i64 %v, i64* %iptr2 3528 /// %f1 = load float* %fptr2 3529 /// %f2 = load float* %fptr3 3530 /// 3531 /// Here we want to form 3 partitions of the alloca, each 4 bytes large, and 3532 /// promote everything so we recover the 2 SSA values that should have been 3533 /// there all along. 3534 /// 3535 /// \returns true if any changes are made. 3536 bool SROA::presplitLoadsAndStores(AllocaInst &AI, AllocaSlices &AS) { 3537 LLVM_DEBUG(dbgs() << "Pre-splitting loads and stores\n"); 3538 3539 // Track the loads and stores which are candidates for pre-splitting here, in 3540 // the order they first appear during the partition scan. These give stable 3541 // iteration order and a basis for tracking which loads and stores we 3542 // actually split. 3543 SmallVector<LoadInst *, 4> Loads; 3544 SmallVector<StoreInst *, 4> Stores; 3545 3546 // We need to accumulate the splits required of each load or store where we 3547 // can find them via a direct lookup. This is important to cross-check loads 3548 // and stores against each other. We also track the slice so that we can kill 3549 // all the slices that end up split. 3550 struct SplitOffsets { 3551 Slice *S; 3552 std::vector<uint64_t> Splits; 3553 }; 3554 SmallDenseMap<Instruction *, SplitOffsets, 8> SplitOffsetsMap; 3555 3556 // Track loads out of this alloca which cannot, for any reason, be pre-split. 3557 // This is important as we also cannot pre-split stores of those loads! 3558 // FIXME: This is all pretty gross. It means that we can be more aggressive 3559 // in pre-splitting when the load feeding the store happens to come from 3560 // a separate alloca. Put another way, the effectiveness of SROA would be 3561 // decreased by a frontend which just concatenated all of its local allocas 3562 // into one big flat alloca. But defeating such patterns is exactly the job 3563 // SROA is tasked with! Sadly, to not have this discrepancy we would have 3564 // change store pre-splitting to actually force pre-splitting of the load 3565 // that feeds it *and all stores*. That makes pre-splitting much harder, but 3566 // maybe it would make it more principled? 3567 SmallPtrSet<LoadInst *, 8> UnsplittableLoads; 3568 3569 LLVM_DEBUG(dbgs() << " Searching for candidate loads and stores\n"); 3570 for (auto &P : AS.partitions()) { 3571 for (Slice &S : P) { 3572 Instruction *I = cast<Instruction>(S.getUse()->getUser()); 3573 if (!S.isSplittable() || S.endOffset() <= P.endOffset()) { 3574 // If this is a load we have to track that it can't participate in any 3575 // pre-splitting. If this is a store of a load we have to track that 3576 // that load also can't participate in any pre-splitting. 3577 if (auto *LI = dyn_cast<LoadInst>(I)) 3578 UnsplittableLoads.insert(LI); 3579 else if (auto *SI = dyn_cast<StoreInst>(I)) 3580 if (auto *LI = dyn_cast<LoadInst>(SI->getValueOperand())) 3581 UnsplittableLoads.insert(LI); 3582 continue; 3583 } 3584 assert(P.endOffset() > S.beginOffset() && 3585 "Empty or backwards partition!"); 3586 3587 // Determine if this is a pre-splittable slice. 3588 if (auto *LI = dyn_cast<LoadInst>(I)) { 3589 assert(!LI->isVolatile() && "Cannot split volatile loads!"); 3590 3591 // The load must be used exclusively to store into other pointers for 3592 // us to be able to arbitrarily pre-split it. The stores must also be 3593 // simple to avoid changing semantics. 3594 auto IsLoadSimplyStored = [](LoadInst *LI) { 3595 for (User *LU : LI->users()) { 3596 auto *SI = dyn_cast<StoreInst>(LU); 3597 if (!SI || !SI->isSimple()) 3598 return false; 3599 } 3600 return true; 3601 }; 3602 if (!IsLoadSimplyStored(LI)) { 3603 UnsplittableLoads.insert(LI); 3604 continue; 3605 } 3606 3607 Loads.push_back(LI); 3608 } else if (auto *SI = dyn_cast<StoreInst>(I)) { 3609 if (S.getUse() != &SI->getOperandUse(SI->getPointerOperandIndex())) 3610 // Skip stores *of* pointers. FIXME: This shouldn't even be possible! 3611 continue; 3612 auto *StoredLoad = dyn_cast<LoadInst>(SI->getValueOperand()); 3613 if (!StoredLoad || !StoredLoad->isSimple()) 3614 continue; 3615 assert(!SI->isVolatile() && "Cannot split volatile stores!"); 3616 3617 Stores.push_back(SI); 3618 } else { 3619 // Other uses cannot be pre-split. 3620 continue; 3621 } 3622 3623 // Record the initial split. 3624 LLVM_DEBUG(dbgs() << " Candidate: " << *I << "\n"); 3625 auto &Offsets = SplitOffsetsMap[I]; 3626 assert(Offsets.Splits.empty() && 3627 "Should not have splits the first time we see an instruction!"); 3628 Offsets.S = &S; 3629 Offsets.Splits.push_back(P.endOffset() - S.beginOffset()); 3630 } 3631 3632 // Now scan the already split slices, and add a split for any of them which 3633 // we're going to pre-split. 3634 for (Slice *S : P.splitSliceTails()) { 3635 auto SplitOffsetsMapI = 3636 SplitOffsetsMap.find(cast<Instruction>(S->getUse()->getUser())); 3637 if (SplitOffsetsMapI == SplitOffsetsMap.end()) 3638 continue; 3639 auto &Offsets = SplitOffsetsMapI->second; 3640 3641 assert(Offsets.S == S && "Found a mismatched slice!"); 3642 assert(!Offsets.Splits.empty() && 3643 "Cannot have an empty set of splits on the second partition!"); 3644 assert(Offsets.Splits.back() == 3645 P.beginOffset() - Offsets.S->beginOffset() && 3646 "Previous split does not end where this one begins!"); 3647 3648 // Record each split. The last partition's end isn't needed as the size 3649 // of the slice dictates that. 3650 if (S->endOffset() > P.endOffset()) 3651 Offsets.Splits.push_back(P.endOffset() - Offsets.S->beginOffset()); 3652 } 3653 } 3654 3655 // We may have split loads where some of their stores are split stores. For 3656 // such loads and stores, we can only pre-split them if their splits exactly 3657 // match relative to their starting offset. We have to verify this prior to 3658 // any rewriting. 3659 Stores.erase( 3660 llvm::remove_if(Stores, 3661 [&UnsplittableLoads, &SplitOffsetsMap](StoreInst *SI) { 3662 // Lookup the load we are storing in our map of split 3663 // offsets. 3664 auto *LI = cast<LoadInst>(SI->getValueOperand()); 3665 // If it was completely unsplittable, then we're done, 3666 // and this store can't be pre-split. 3667 if (UnsplittableLoads.count(LI)) 3668 return true; 3669 3670 auto LoadOffsetsI = SplitOffsetsMap.find(LI); 3671 if (LoadOffsetsI == SplitOffsetsMap.end()) 3672 return false; // Unrelated loads are definitely safe. 3673 auto &LoadOffsets = LoadOffsetsI->second; 3674 3675 // Now lookup the store's offsets. 3676 auto &StoreOffsets = SplitOffsetsMap[SI]; 3677 3678 // If the relative offsets of each split in the load and 3679 // store match exactly, then we can split them and we 3680 // don't need to remove them here. 3681 if (LoadOffsets.Splits == StoreOffsets.Splits) 3682 return false; 3683 3684 LLVM_DEBUG( 3685 dbgs() 3686 << " Mismatched splits for load and store:\n" 3687 << " " << *LI << "\n" 3688 << " " << *SI << "\n"); 3689 3690 // We've found a store and load that we need to split 3691 // with mismatched relative splits. Just give up on them 3692 // and remove both instructions from our list of 3693 // candidates. 3694 UnsplittableLoads.insert(LI); 3695 return true; 3696 }), 3697 Stores.end()); 3698 // Now we have to go *back* through all the stores, because a later store may 3699 // have caused an earlier store's load to become unsplittable and if it is 3700 // unsplittable for the later store, then we can't rely on it being split in 3701 // the earlier store either. 3702 Stores.erase(llvm::remove_if(Stores, 3703 [&UnsplittableLoads](StoreInst *SI) { 3704 auto *LI = 3705 cast<LoadInst>(SI->getValueOperand()); 3706 return UnsplittableLoads.count(LI); 3707 }), 3708 Stores.end()); 3709 // Once we've established all the loads that can't be split for some reason, 3710 // filter any that made it into our list out. 3711 Loads.erase(llvm::remove_if(Loads, 3712 [&UnsplittableLoads](LoadInst *LI) { 3713 return UnsplittableLoads.count(LI); 3714 }), 3715 Loads.end()); 3716 3717 // If no loads or stores are left, there is no pre-splitting to be done for 3718 // this alloca. 3719 if (Loads.empty() && Stores.empty()) 3720 return false; 3721 3722 // From here on, we can't fail and will be building new accesses, so rig up 3723 // an IR builder. 3724 IRBuilderTy IRB(&AI); 3725 3726 // Collect the new slices which we will merge into the alloca slices. 3727 SmallVector<Slice, 4> NewSlices; 3728 3729 // Track any allocas we end up splitting loads and stores for so we iterate 3730 // on them. 3731 SmallPtrSet<AllocaInst *, 4> ResplitPromotableAllocas; 3732 3733 // At this point, we have collected all of the loads and stores we can 3734 // pre-split, and the specific splits needed for them. We actually do the 3735 // splitting in a specific order in order to handle when one of the loads in 3736 // the value operand to one of the stores. 3737 // 3738 // First, we rewrite all of the split loads, and just accumulate each split 3739 // load in a parallel structure. We also build the slices for them and append 3740 // them to the alloca slices. 3741 SmallDenseMap<LoadInst *, std::vector<LoadInst *>, 1> SplitLoadsMap; 3742 std::vector<LoadInst *> SplitLoads; 3743 const DataLayout &DL = AI.getModule()->getDataLayout(); 3744 for (LoadInst *LI : Loads) { 3745 SplitLoads.clear(); 3746 3747 IntegerType *Ty = cast<IntegerType>(LI->getType()); 3748 uint64_t LoadSize = Ty->getBitWidth() / 8; 3749 assert(LoadSize > 0 && "Cannot have a zero-sized integer load!"); 3750 3751 auto &Offsets = SplitOffsetsMap[LI]; 3752 assert(LoadSize == Offsets.S->endOffset() - Offsets.S->beginOffset() && 3753 "Slice size should always match load size exactly!"); 3754 uint64_t BaseOffset = Offsets.S->beginOffset(); 3755 assert(BaseOffset + LoadSize > BaseOffset && 3756 "Cannot represent alloca access size using 64-bit integers!"); 3757 3758 Instruction *BasePtr = cast<Instruction>(LI->getPointerOperand()); 3759 IRB.SetInsertPoint(LI); 3760 3761 LLVM_DEBUG(dbgs() << " Splitting load: " << *LI << "\n"); 3762 3763 uint64_t PartOffset = 0, PartSize = Offsets.Splits.front(); 3764 int Idx = 0, Size = Offsets.Splits.size(); 3765 for (;;) { 3766 auto *PartTy = Type::getIntNTy(Ty->getContext(), PartSize * 8); 3767 auto AS = LI->getPointerAddressSpace(); 3768 auto *PartPtrTy = PartTy->getPointerTo(AS); 3769 LoadInst *PLoad = IRB.CreateAlignedLoad( 3770 getAdjustedPtr(IRB, DL, BasePtr, 3771 APInt(DL.getIndexSizeInBits(AS), PartOffset), 3772 PartPtrTy, BasePtr->getName() + "."), 3773 getAdjustedAlignment(LI, PartOffset, DL), /*IsVolatile*/ false, 3774 LI->getName()); 3775 PLoad->copyMetadata(*LI, LLVMContext::MD_mem_parallel_loop_access); 3776 3777 // Append this load onto the list of split loads so we can find it later 3778 // to rewrite the stores. 3779 SplitLoads.push_back(PLoad); 3780 3781 // Now build a new slice for the alloca. 3782 NewSlices.push_back( 3783 Slice(BaseOffset + PartOffset, BaseOffset + PartOffset + PartSize, 3784 &PLoad->getOperandUse(PLoad->getPointerOperandIndex()), 3785 /*IsSplittable*/ false)); 3786 LLVM_DEBUG(dbgs() << " new slice [" << NewSlices.back().beginOffset() 3787 << ", " << NewSlices.back().endOffset() 3788 << "): " << *PLoad << "\n"); 3789 3790 // See if we've handled all the splits. 3791 if (Idx >= Size) 3792 break; 3793 3794 // Setup the next partition. 3795 PartOffset = Offsets.Splits[Idx]; 3796 ++Idx; 3797 PartSize = (Idx < Size ? Offsets.Splits[Idx] : LoadSize) - PartOffset; 3798 } 3799 3800 // Now that we have the split loads, do the slow walk over all uses of the 3801 // load and rewrite them as split stores, or save the split loads to use 3802 // below if the store is going to be split there anyways. 3803 bool DeferredStores = false; 3804 for (User *LU : LI->users()) { 3805 StoreInst *SI = cast<StoreInst>(LU); 3806 if (!Stores.empty() && SplitOffsetsMap.count(SI)) { 3807 DeferredStores = true; 3808 LLVM_DEBUG(dbgs() << " Deferred splitting of store: " << *SI 3809 << "\n"); 3810 continue; 3811 } 3812 3813 Value *StoreBasePtr = SI->getPointerOperand(); 3814 IRB.SetInsertPoint(SI); 3815 3816 LLVM_DEBUG(dbgs() << " Splitting store of load: " << *SI << "\n"); 3817 3818 for (int Idx = 0, Size = SplitLoads.size(); Idx < Size; ++Idx) { 3819 LoadInst *PLoad = SplitLoads[Idx]; 3820 uint64_t PartOffset = Idx == 0 ? 0 : Offsets.Splits[Idx - 1]; 3821 auto *PartPtrTy = 3822 PLoad->getType()->getPointerTo(SI->getPointerAddressSpace()); 3823 3824 auto AS = SI->getPointerAddressSpace(); 3825 StoreInst *PStore = IRB.CreateAlignedStore( 3826 PLoad, 3827 getAdjustedPtr(IRB, DL, StoreBasePtr, 3828 APInt(DL.getIndexSizeInBits(AS), PartOffset), 3829 PartPtrTy, StoreBasePtr->getName() + "."), 3830 getAdjustedAlignment(SI, PartOffset, DL), /*IsVolatile*/ false); 3831 PStore->copyMetadata(*LI, LLVMContext::MD_mem_parallel_loop_access); 3832 LLVM_DEBUG(dbgs() << " +" << PartOffset << ":" << *PStore << "\n"); 3833 } 3834 3835 // We want to immediately iterate on any allocas impacted by splitting 3836 // this store, and we have to track any promotable alloca (indicated by 3837 // a direct store) as needing to be resplit because it is no longer 3838 // promotable. 3839 if (AllocaInst *OtherAI = dyn_cast<AllocaInst>(StoreBasePtr)) { 3840 ResplitPromotableAllocas.insert(OtherAI); 3841 Worklist.insert(OtherAI); 3842 } else if (AllocaInst *OtherAI = dyn_cast<AllocaInst>( 3843 StoreBasePtr->stripInBoundsOffsets())) { 3844 Worklist.insert(OtherAI); 3845 } 3846 3847 // Mark the original store as dead. 3848 DeadInsts.insert(SI); 3849 } 3850 3851 // Save the split loads if there are deferred stores among the users. 3852 if (DeferredStores) 3853 SplitLoadsMap.insert(std::make_pair(LI, std::move(SplitLoads))); 3854 3855 // Mark the original load as dead and kill the original slice. 3856 DeadInsts.insert(LI); 3857 Offsets.S->kill(); 3858 } 3859 3860 // Second, we rewrite all of the split stores. At this point, we know that 3861 // all loads from this alloca have been split already. For stores of such 3862 // loads, we can simply look up the pre-existing split loads. For stores of 3863 // other loads, we split those loads first and then write split stores of 3864 // them. 3865 for (StoreInst *SI : Stores) { 3866 auto *LI = cast<LoadInst>(SI->getValueOperand()); 3867 IntegerType *Ty = cast<IntegerType>(LI->getType()); 3868 uint64_t StoreSize = Ty->getBitWidth() / 8; 3869 assert(StoreSize > 0 && "Cannot have a zero-sized integer store!"); 3870 3871 auto &Offsets = SplitOffsetsMap[SI]; 3872 assert(StoreSize == Offsets.S->endOffset() - Offsets.S->beginOffset() && 3873 "Slice size should always match load size exactly!"); 3874 uint64_t BaseOffset = Offsets.S->beginOffset(); 3875 assert(BaseOffset + StoreSize > BaseOffset && 3876 "Cannot represent alloca access size using 64-bit integers!"); 3877 3878 Value *LoadBasePtr = LI->getPointerOperand(); 3879 Instruction *StoreBasePtr = cast<Instruction>(SI->getPointerOperand()); 3880 3881 LLVM_DEBUG(dbgs() << " Splitting store: " << *SI << "\n"); 3882 3883 // Check whether we have an already split load. 3884 auto SplitLoadsMapI = SplitLoadsMap.find(LI); 3885 std::vector<LoadInst *> *SplitLoads = nullptr; 3886 if (SplitLoadsMapI != SplitLoadsMap.end()) { 3887 SplitLoads = &SplitLoadsMapI->second; 3888 assert(SplitLoads->size() == Offsets.Splits.size() + 1 && 3889 "Too few split loads for the number of splits in the store!"); 3890 } else { 3891 LLVM_DEBUG(dbgs() << " of load: " << *LI << "\n"); 3892 } 3893 3894 uint64_t PartOffset = 0, PartSize = Offsets.Splits.front(); 3895 int Idx = 0, Size = Offsets.Splits.size(); 3896 for (;;) { 3897 auto *PartTy = Type::getIntNTy(Ty->getContext(), PartSize * 8); 3898 auto *LoadPartPtrTy = PartTy->getPointerTo(LI->getPointerAddressSpace()); 3899 auto *StorePartPtrTy = PartTy->getPointerTo(SI->getPointerAddressSpace()); 3900 3901 // Either lookup a split load or create one. 3902 LoadInst *PLoad; 3903 if (SplitLoads) { 3904 PLoad = (*SplitLoads)[Idx]; 3905 } else { 3906 IRB.SetInsertPoint(LI); 3907 auto AS = LI->getPointerAddressSpace(); 3908 PLoad = IRB.CreateAlignedLoad( 3909 getAdjustedPtr(IRB, DL, LoadBasePtr, 3910 APInt(DL.getIndexSizeInBits(AS), PartOffset), 3911 LoadPartPtrTy, LoadBasePtr->getName() + "."), 3912 getAdjustedAlignment(LI, PartOffset, DL), /*IsVolatile*/ false, 3913 LI->getName()); 3914 } 3915 3916 // And store this partition. 3917 IRB.SetInsertPoint(SI); 3918 auto AS = SI->getPointerAddressSpace(); 3919 StoreInst *PStore = IRB.CreateAlignedStore( 3920 PLoad, 3921 getAdjustedPtr(IRB, DL, StoreBasePtr, 3922 APInt(DL.getIndexSizeInBits(AS), PartOffset), 3923 StorePartPtrTy, StoreBasePtr->getName() + "."), 3924 getAdjustedAlignment(SI, PartOffset, DL), /*IsVolatile*/ false); 3925 3926 // Now build a new slice for the alloca. 3927 NewSlices.push_back( 3928 Slice(BaseOffset + PartOffset, BaseOffset + PartOffset + PartSize, 3929 &PStore->getOperandUse(PStore->getPointerOperandIndex()), 3930 /*IsSplittable*/ false)); 3931 LLVM_DEBUG(dbgs() << " new slice [" << NewSlices.back().beginOffset() 3932 << ", " << NewSlices.back().endOffset() 3933 << "): " << *PStore << "\n"); 3934 if (!SplitLoads) { 3935 LLVM_DEBUG(dbgs() << " of split load: " << *PLoad << "\n"); 3936 } 3937 3938 // See if we've finished all the splits. 3939 if (Idx >= Size) 3940 break; 3941 3942 // Setup the next partition. 3943 PartOffset = Offsets.Splits[Idx]; 3944 ++Idx; 3945 PartSize = (Idx < Size ? Offsets.Splits[Idx] : StoreSize) - PartOffset; 3946 } 3947 3948 // We want to immediately iterate on any allocas impacted by splitting 3949 // this load, which is only relevant if it isn't a load of this alloca and 3950 // thus we didn't already split the loads above. We also have to keep track 3951 // of any promotable allocas we split loads on as they can no longer be 3952 // promoted. 3953 if (!SplitLoads) { 3954 if (AllocaInst *OtherAI = dyn_cast<AllocaInst>(LoadBasePtr)) { 3955 assert(OtherAI != &AI && "We can't re-split our own alloca!"); 3956 ResplitPromotableAllocas.insert(OtherAI); 3957 Worklist.insert(OtherAI); 3958 } else if (AllocaInst *OtherAI = dyn_cast<AllocaInst>( 3959 LoadBasePtr->stripInBoundsOffsets())) { 3960 assert(OtherAI != &AI && "We can't re-split our own alloca!"); 3961 Worklist.insert(OtherAI); 3962 } 3963 } 3964 3965 // Mark the original store as dead now that we've split it up and kill its 3966 // slice. Note that we leave the original load in place unless this store 3967 // was its only use. It may in turn be split up if it is an alloca load 3968 // for some other alloca, but it may be a normal load. This may introduce 3969 // redundant loads, but where those can be merged the rest of the optimizer 3970 // should handle the merging, and this uncovers SSA splits which is more 3971 // important. In practice, the original loads will almost always be fully 3972 // split and removed eventually, and the splits will be merged by any 3973 // trivial CSE, including instcombine. 3974 if (LI->hasOneUse()) { 3975 assert(*LI->user_begin() == SI && "Single use isn't this store!"); 3976 DeadInsts.insert(LI); 3977 } 3978 DeadInsts.insert(SI); 3979 Offsets.S->kill(); 3980 } 3981 3982 // Remove the killed slices that have ben pre-split. 3983 AS.erase(llvm::remove_if(AS, [](const Slice &S) { return S.isDead(); }), 3984 AS.end()); 3985 3986 // Insert our new slices. This will sort and merge them into the sorted 3987 // sequence. 3988 AS.insert(NewSlices); 3989 3990 LLVM_DEBUG(dbgs() << " Pre-split slices:\n"); 3991 #ifndef NDEBUG 3992 for (auto I = AS.begin(), E = AS.end(); I != E; ++I) 3993 LLVM_DEBUG(AS.print(dbgs(), I, " ")); 3994 #endif 3995 3996 // Finally, don't try to promote any allocas that new require re-splitting. 3997 // They have already been added to the worklist above. 3998 PromotableAllocas.erase( 3999 llvm::remove_if( 4000 PromotableAllocas, 4001 [&](AllocaInst *AI) { return ResplitPromotableAllocas.count(AI); }), 4002 PromotableAllocas.end()); 4003 4004 return true; 4005 } 4006 4007 /// Rewrite an alloca partition's users. 4008 /// 4009 /// This routine drives both of the rewriting goals of the SROA pass. It tries 4010 /// to rewrite uses of an alloca partition to be conducive for SSA value 4011 /// promotion. If the partition needs a new, more refined alloca, this will 4012 /// build that new alloca, preserving as much type information as possible, and 4013 /// rewrite the uses of the old alloca to point at the new one and have the 4014 /// appropriate new offsets. It also evaluates how successful the rewrite was 4015 /// at enabling promotion and if it was successful queues the alloca to be 4016 /// promoted. 4017 AllocaInst *SROA::rewritePartition(AllocaInst &AI, AllocaSlices &AS, 4018 Partition &P) { 4019 // Try to compute a friendly type for this partition of the alloca. This 4020 // won't always succeed, in which case we fall back to a legal integer type 4021 // or an i8 array of an appropriate size. 4022 Type *SliceTy = nullptr; 4023 const DataLayout &DL = AI.getModule()->getDataLayout(); 4024 if (Type *CommonUseTy = findCommonType(P.begin(), P.end(), P.endOffset())) 4025 if (DL.getTypeAllocSize(CommonUseTy) >= P.size()) 4026 SliceTy = CommonUseTy; 4027 if (!SliceTy) 4028 if (Type *TypePartitionTy = getTypePartition(DL, AI.getAllocatedType(), 4029 P.beginOffset(), P.size())) 4030 SliceTy = TypePartitionTy; 4031 if ((!SliceTy || (SliceTy->isArrayTy() && 4032 SliceTy->getArrayElementType()->isIntegerTy())) && 4033 DL.isLegalInteger(P.size() * 8)) 4034 SliceTy = Type::getIntNTy(*C, P.size() * 8); 4035 if (!SliceTy) 4036 SliceTy = ArrayType::get(Type::getInt8Ty(*C), P.size()); 4037 assert(DL.getTypeAllocSize(SliceTy) >= P.size()); 4038 4039 bool IsIntegerPromotable = isIntegerWideningViable(P, SliceTy, DL); 4040 4041 VectorType *VecTy = 4042 IsIntegerPromotable ? nullptr : isVectorPromotionViable(P, DL); 4043 if (VecTy) 4044 SliceTy = VecTy; 4045 4046 // Check for the case where we're going to rewrite to a new alloca of the 4047 // exact same type as the original, and with the same access offsets. In that 4048 // case, re-use the existing alloca, but still run through the rewriter to 4049 // perform phi and select speculation. 4050 // P.beginOffset() can be non-zero even with the same type in a case with 4051 // out-of-bounds access (e.g. @PR35657 function in SROA/basictest.ll). 4052 AllocaInst *NewAI; 4053 if (SliceTy == AI.getAllocatedType() && P.beginOffset() == 0) { 4054 NewAI = &AI; 4055 // FIXME: We should be able to bail at this point with "nothing changed". 4056 // FIXME: We might want to defer PHI speculation until after here. 4057 // FIXME: return nullptr; 4058 } else { 4059 unsigned Alignment = AI.getAlignment(); 4060 if (!Alignment) { 4061 // The minimum alignment which users can rely on when the explicit 4062 // alignment is omitted or zero is that required by the ABI for this 4063 // type. 4064 Alignment = DL.getABITypeAlignment(AI.getAllocatedType()); 4065 } 4066 Alignment = MinAlign(Alignment, P.beginOffset()); 4067 // If we will get at least this much alignment from the type alone, leave 4068 // the alloca's alignment unconstrained. 4069 if (Alignment <= DL.getABITypeAlignment(SliceTy)) 4070 Alignment = 0; 4071 NewAI = new AllocaInst( 4072 SliceTy, AI.getType()->getAddressSpace(), nullptr, Alignment, 4073 AI.getName() + ".sroa." + Twine(P.begin() - AS.begin()), &AI); 4074 // Copy the old AI debug location over to the new one. 4075 NewAI->setDebugLoc(AI.getDebugLoc()); 4076 ++NumNewAllocas; 4077 } 4078 4079 LLVM_DEBUG(dbgs() << "Rewriting alloca partition " 4080 << "[" << P.beginOffset() << "," << P.endOffset() 4081 << ") to: " << *NewAI << "\n"); 4082 4083 // Track the high watermark on the worklist as it is only relevant for 4084 // promoted allocas. We will reset it to this point if the alloca is not in 4085 // fact scheduled for promotion. 4086 unsigned PPWOldSize = PostPromotionWorklist.size(); 4087 unsigned NumUses = 0; 4088 SmallSetVector<PHINode *, 8> PHIUsers; 4089 SmallSetVector<SelectInst *, 8> SelectUsers; 4090 4091 AllocaSliceRewriter Rewriter(DL, AS, *this, AI, *NewAI, P.beginOffset(), 4092 P.endOffset(), IsIntegerPromotable, VecTy, 4093 PHIUsers, SelectUsers); 4094 bool Promotable = true; 4095 for (Slice *S : P.splitSliceTails()) { 4096 Promotable &= Rewriter.visit(S); 4097 ++NumUses; 4098 } 4099 for (Slice &S : P) { 4100 Promotable &= Rewriter.visit(&S); 4101 ++NumUses; 4102 } 4103 4104 NumAllocaPartitionUses += NumUses; 4105 MaxUsesPerAllocaPartition.updateMax(NumUses); 4106 4107 // Now that we've processed all the slices in the new partition, check if any 4108 // PHIs or Selects would block promotion. 4109 for (PHINode *PHI : PHIUsers) 4110 if (!isSafePHIToSpeculate(*PHI)) { 4111 Promotable = false; 4112 PHIUsers.clear(); 4113 SelectUsers.clear(); 4114 break; 4115 } 4116 4117 for (SelectInst *Sel : SelectUsers) 4118 if (!isSafeSelectToSpeculate(*Sel)) { 4119 Promotable = false; 4120 PHIUsers.clear(); 4121 SelectUsers.clear(); 4122 break; 4123 } 4124 4125 if (Promotable) { 4126 if (PHIUsers.empty() && SelectUsers.empty()) { 4127 // Promote the alloca. 4128 PromotableAllocas.push_back(NewAI); 4129 } else { 4130 // If we have either PHIs or Selects to speculate, add them to those 4131 // worklists and re-queue the new alloca so that we promote in on the 4132 // next iteration. 4133 for (PHINode *PHIUser : PHIUsers) 4134 SpeculatablePHIs.insert(PHIUser); 4135 for (SelectInst *SelectUser : SelectUsers) 4136 SpeculatableSelects.insert(SelectUser); 4137 Worklist.insert(NewAI); 4138 } 4139 } else { 4140 // Drop any post-promotion work items if promotion didn't happen. 4141 while (PostPromotionWorklist.size() > PPWOldSize) 4142 PostPromotionWorklist.pop_back(); 4143 4144 // We couldn't promote and we didn't create a new partition, nothing 4145 // happened. 4146 if (NewAI == &AI) 4147 return nullptr; 4148 4149 // If we can't promote the alloca, iterate on it to check for new 4150 // refinements exposed by splitting the current alloca. Don't iterate on an 4151 // alloca which didn't actually change and didn't get promoted. 4152 Worklist.insert(NewAI); 4153 } 4154 4155 return NewAI; 4156 } 4157 4158 /// Walks the slices of an alloca and form partitions based on them, 4159 /// rewriting each of their uses. 4160 bool SROA::splitAlloca(AllocaInst &AI, AllocaSlices &AS) { 4161 if (AS.begin() == AS.end()) 4162 return false; 4163 4164 unsigned NumPartitions = 0; 4165 bool Changed = false; 4166 const DataLayout &DL = AI.getModule()->getDataLayout(); 4167 4168 // First try to pre-split loads and stores. 4169 Changed |= presplitLoadsAndStores(AI, AS); 4170 4171 // Now that we have identified any pre-splitting opportunities, 4172 // mark loads and stores unsplittable except for the following case. 4173 // We leave a slice splittable if all other slices are disjoint or fully 4174 // included in the slice, such as whole-alloca loads and stores. 4175 // If we fail to split these during pre-splitting, we want to force them 4176 // to be rewritten into a partition. 4177 bool IsSorted = true; 4178 4179 uint64_t AllocaSize = DL.getTypeAllocSize(AI.getAllocatedType()); 4180 const uint64_t MaxBitVectorSize = 1024; 4181 if (AllocaSize <= MaxBitVectorSize) { 4182 // If a byte boundary is included in any load or store, a slice starting or 4183 // ending at the boundary is not splittable. 4184 SmallBitVector SplittableOffset(AllocaSize + 1, true); 4185 for (Slice &S : AS) 4186 for (unsigned O = S.beginOffset() + 1; 4187 O < S.endOffset() && O < AllocaSize; O++) 4188 SplittableOffset.reset(O); 4189 4190 for (Slice &S : AS) { 4191 if (!S.isSplittable()) 4192 continue; 4193 4194 if ((S.beginOffset() > AllocaSize || SplittableOffset[S.beginOffset()]) && 4195 (S.endOffset() > AllocaSize || SplittableOffset[S.endOffset()])) 4196 continue; 4197 4198 if (isa<LoadInst>(S.getUse()->getUser()) || 4199 isa<StoreInst>(S.getUse()->getUser())) { 4200 S.makeUnsplittable(); 4201 IsSorted = false; 4202 } 4203 } 4204 } 4205 else { 4206 // We only allow whole-alloca splittable loads and stores 4207 // for a large alloca to avoid creating too large BitVector. 4208 for (Slice &S : AS) { 4209 if (!S.isSplittable()) 4210 continue; 4211 4212 if (S.beginOffset() == 0 && S.endOffset() >= AllocaSize) 4213 continue; 4214 4215 if (isa<LoadInst>(S.getUse()->getUser()) || 4216 isa<StoreInst>(S.getUse()->getUser())) { 4217 S.makeUnsplittable(); 4218 IsSorted = false; 4219 } 4220 } 4221 } 4222 4223 if (!IsSorted) 4224 llvm::sort(AS); 4225 4226 /// Describes the allocas introduced by rewritePartition in order to migrate 4227 /// the debug info. 4228 struct Fragment { 4229 AllocaInst *Alloca; 4230 uint64_t Offset; 4231 uint64_t Size; 4232 Fragment(AllocaInst *AI, uint64_t O, uint64_t S) 4233 : Alloca(AI), Offset(O), Size(S) {} 4234 }; 4235 SmallVector<Fragment, 4> Fragments; 4236 4237 // Rewrite each partition. 4238 for (auto &P : AS.partitions()) { 4239 if (AllocaInst *NewAI = rewritePartition(AI, AS, P)) { 4240 Changed = true; 4241 if (NewAI != &AI) { 4242 uint64_t SizeOfByte = 8; 4243 uint64_t AllocaSize = DL.getTypeSizeInBits(NewAI->getAllocatedType()); 4244 // Don't include any padding. 4245 uint64_t Size = std::min(AllocaSize, P.size() * SizeOfByte); 4246 Fragments.push_back(Fragment(NewAI, P.beginOffset() * SizeOfByte, Size)); 4247 } 4248 } 4249 ++NumPartitions; 4250 } 4251 4252 NumAllocaPartitions += NumPartitions; 4253 MaxPartitionsPerAlloca.updateMax(NumPartitions); 4254 4255 // Migrate debug information from the old alloca to the new alloca(s) 4256 // and the individual partitions. 4257 TinyPtrVector<DbgVariableIntrinsic *> DbgDeclares = FindDbgAddrUses(&AI); 4258 if (!DbgDeclares.empty()) { 4259 auto *Var = DbgDeclares.front()->getVariable(); 4260 auto *Expr = DbgDeclares.front()->getExpression(); 4261 auto VarSize = Var->getSizeInBits(); 4262 DIBuilder DIB(*AI.getModule(), /*AllowUnresolved*/ false); 4263 uint64_t AllocaSize = DL.getTypeSizeInBits(AI.getAllocatedType()); 4264 for (auto Fragment : Fragments) { 4265 // Create a fragment expression describing the new partition or reuse AI's 4266 // expression if there is only one partition. 4267 auto *FragmentExpr = Expr; 4268 if (Fragment.Size < AllocaSize || Expr->isFragment()) { 4269 // If this alloca is already a scalar replacement of a larger aggregate, 4270 // Fragment.Offset describes the offset inside the scalar. 4271 auto ExprFragment = Expr->getFragmentInfo(); 4272 uint64_t Offset = ExprFragment ? ExprFragment->OffsetInBits : 0; 4273 uint64_t Start = Offset + Fragment.Offset; 4274 uint64_t Size = Fragment.Size; 4275 if (ExprFragment) { 4276 uint64_t AbsEnd = 4277 ExprFragment->OffsetInBits + ExprFragment->SizeInBits; 4278 if (Start >= AbsEnd) 4279 // No need to describe a SROAed padding. 4280 continue; 4281 Size = std::min(Size, AbsEnd - Start); 4282 } 4283 // The new, smaller fragment is stenciled out from the old fragment. 4284 if (auto OrigFragment = FragmentExpr->getFragmentInfo()) { 4285 assert(Start >= OrigFragment->OffsetInBits && 4286 "new fragment is outside of original fragment"); 4287 Start -= OrigFragment->OffsetInBits; 4288 } 4289 4290 // The alloca may be larger than the variable. 4291 if (VarSize) { 4292 if (Size > *VarSize) 4293 Size = *VarSize; 4294 if (Size == 0 || Start + Size > *VarSize) 4295 continue; 4296 } 4297 4298 // Avoid creating a fragment expression that covers the entire variable. 4299 if (!VarSize || *VarSize != Size) { 4300 if (auto E = 4301 DIExpression::createFragmentExpression(Expr, Start, Size)) 4302 FragmentExpr = *E; 4303 else 4304 continue; 4305 } 4306 } 4307 4308 // Remove any existing intrinsics describing the same alloca. 4309 for (DbgVariableIntrinsic *OldDII : FindDbgAddrUses(Fragment.Alloca)) 4310 OldDII->eraseFromParent(); 4311 4312 DIB.insertDeclare(Fragment.Alloca, Var, FragmentExpr, 4313 DbgDeclares.front()->getDebugLoc(), &AI); 4314 } 4315 } 4316 return Changed; 4317 } 4318 4319 /// Clobber a use with undef, deleting the used value if it becomes dead. 4320 void SROA::clobberUse(Use &U) { 4321 Value *OldV = U; 4322 // Replace the use with an undef value. 4323 U = UndefValue::get(OldV->getType()); 4324 4325 // Check for this making an instruction dead. We have to garbage collect 4326 // all the dead instructions to ensure the uses of any alloca end up being 4327 // minimal. 4328 if (Instruction *OldI = dyn_cast<Instruction>(OldV)) 4329 if (isInstructionTriviallyDead(OldI)) { 4330 DeadInsts.insert(OldI); 4331 } 4332 } 4333 4334 /// Analyze an alloca for SROA. 4335 /// 4336 /// This analyzes the alloca to ensure we can reason about it, builds 4337 /// the slices of the alloca, and then hands it off to be split and 4338 /// rewritten as needed. 4339 bool SROA::runOnAlloca(AllocaInst &AI) { 4340 LLVM_DEBUG(dbgs() << "SROA alloca: " << AI << "\n"); 4341 ++NumAllocasAnalyzed; 4342 4343 // Special case dead allocas, as they're trivial. 4344 if (AI.use_empty()) { 4345 AI.eraseFromParent(); 4346 return true; 4347 } 4348 const DataLayout &DL = AI.getModule()->getDataLayout(); 4349 4350 // Skip alloca forms that this analysis can't handle. 4351 if (AI.isArrayAllocation() || !AI.getAllocatedType()->isSized() || 4352 DL.getTypeAllocSize(AI.getAllocatedType()) == 0) 4353 return false; 4354 4355 bool Changed = false; 4356 4357 // First, split any FCA loads and stores touching this alloca to promote 4358 // better splitting and promotion opportunities. 4359 AggLoadStoreRewriter AggRewriter; 4360 Changed |= AggRewriter.rewrite(AI); 4361 4362 // Build the slices using a recursive instruction-visiting builder. 4363 AllocaSlices AS(DL, AI); 4364 LLVM_DEBUG(AS.print(dbgs())); 4365 if (AS.isEscaped()) 4366 return Changed; 4367 4368 // Delete all the dead users of this alloca before splitting and rewriting it. 4369 for (Instruction *DeadUser : AS.getDeadUsers()) { 4370 // Free up everything used by this instruction. 4371 for (Use &DeadOp : DeadUser->operands()) 4372 clobberUse(DeadOp); 4373 4374 // Now replace the uses of this instruction. 4375 DeadUser->replaceAllUsesWith(UndefValue::get(DeadUser->getType())); 4376 4377 // And mark it for deletion. 4378 DeadInsts.insert(DeadUser); 4379 Changed = true; 4380 } 4381 for (Use *DeadOp : AS.getDeadOperands()) { 4382 clobberUse(*DeadOp); 4383 Changed = true; 4384 } 4385 4386 // No slices to split. Leave the dead alloca for a later pass to clean up. 4387 if (AS.begin() == AS.end()) 4388 return Changed; 4389 4390 Changed |= splitAlloca(AI, AS); 4391 4392 LLVM_DEBUG(dbgs() << " Speculating PHIs\n"); 4393 while (!SpeculatablePHIs.empty()) 4394 speculatePHINodeLoads(*SpeculatablePHIs.pop_back_val()); 4395 4396 LLVM_DEBUG(dbgs() << " Speculating Selects\n"); 4397 while (!SpeculatableSelects.empty()) 4398 speculateSelectInstLoads(*SpeculatableSelects.pop_back_val()); 4399 4400 return Changed; 4401 } 4402 4403 /// Delete the dead instructions accumulated in this run. 4404 /// 4405 /// Recursively deletes the dead instructions we've accumulated. This is done 4406 /// at the very end to maximize locality of the recursive delete and to 4407 /// minimize the problems of invalidated instruction pointers as such pointers 4408 /// are used heavily in the intermediate stages of the algorithm. 4409 /// 4410 /// We also record the alloca instructions deleted here so that they aren't 4411 /// subsequently handed to mem2reg to promote. 4412 bool SROA::deleteDeadInstructions( 4413 SmallPtrSetImpl<AllocaInst *> &DeletedAllocas) { 4414 bool Changed = false; 4415 while (!DeadInsts.empty()) { 4416 Instruction *I = DeadInsts.pop_back_val(); 4417 LLVM_DEBUG(dbgs() << "Deleting dead instruction: " << *I << "\n"); 4418 4419 // If the instruction is an alloca, find the possible dbg.declare connected 4420 // to it, and remove it too. We must do this before calling RAUW or we will 4421 // not be able to find it. 4422 if (AllocaInst *AI = dyn_cast<AllocaInst>(I)) { 4423 DeletedAllocas.insert(AI); 4424 for (DbgVariableIntrinsic *OldDII : FindDbgAddrUses(AI)) 4425 OldDII->eraseFromParent(); 4426 } 4427 4428 I->replaceAllUsesWith(UndefValue::get(I->getType())); 4429 4430 for (Use &Operand : I->operands()) 4431 if (Instruction *U = dyn_cast<Instruction>(Operand)) { 4432 // Zero out the operand and see if it becomes trivially dead. 4433 Operand = nullptr; 4434 if (isInstructionTriviallyDead(U)) 4435 DeadInsts.insert(U); 4436 } 4437 4438 ++NumDeleted; 4439 I->eraseFromParent(); 4440 Changed = true; 4441 } 4442 return Changed; 4443 } 4444 4445 /// Promote the allocas, using the best available technique. 4446 /// 4447 /// This attempts to promote whatever allocas have been identified as viable in 4448 /// the PromotableAllocas list. If that list is empty, there is nothing to do. 4449 /// This function returns whether any promotion occurred. 4450 bool SROA::promoteAllocas(Function &F) { 4451 if (PromotableAllocas.empty()) 4452 return false; 4453 4454 NumPromoted += PromotableAllocas.size(); 4455 4456 LLVM_DEBUG(dbgs() << "Promoting allocas with mem2reg...\n"); 4457 PromoteMemToReg(PromotableAllocas, *DT, AC); 4458 PromotableAllocas.clear(); 4459 return true; 4460 } 4461 4462 PreservedAnalyses SROA::runImpl(Function &F, DominatorTree &RunDT, 4463 AssumptionCache &RunAC) { 4464 LLVM_DEBUG(dbgs() << "SROA function: " << F.getName() << "\n"); 4465 C = &F.getContext(); 4466 DT = &RunDT; 4467 AC = &RunAC; 4468 4469 BasicBlock &EntryBB = F.getEntryBlock(); 4470 for (BasicBlock::iterator I = EntryBB.begin(), E = std::prev(EntryBB.end()); 4471 I != E; ++I) { 4472 if (AllocaInst *AI = dyn_cast<AllocaInst>(I)) 4473 Worklist.insert(AI); 4474 } 4475 4476 bool Changed = false; 4477 // A set of deleted alloca instruction pointers which should be removed from 4478 // the list of promotable allocas. 4479 SmallPtrSet<AllocaInst *, 4> DeletedAllocas; 4480 4481 do { 4482 while (!Worklist.empty()) { 4483 Changed |= runOnAlloca(*Worklist.pop_back_val()); 4484 Changed |= deleteDeadInstructions(DeletedAllocas); 4485 4486 // Remove the deleted allocas from various lists so that we don't try to 4487 // continue processing them. 4488 if (!DeletedAllocas.empty()) { 4489 auto IsInSet = [&](AllocaInst *AI) { return DeletedAllocas.count(AI); }; 4490 Worklist.remove_if(IsInSet); 4491 PostPromotionWorklist.remove_if(IsInSet); 4492 PromotableAllocas.erase(llvm::remove_if(PromotableAllocas, IsInSet), 4493 PromotableAllocas.end()); 4494 DeletedAllocas.clear(); 4495 } 4496 } 4497 4498 Changed |= promoteAllocas(F); 4499 4500 Worklist = PostPromotionWorklist; 4501 PostPromotionWorklist.clear(); 4502 } while (!Worklist.empty()); 4503 4504 if (!Changed) 4505 return PreservedAnalyses::all(); 4506 4507 PreservedAnalyses PA; 4508 PA.preserveSet<CFGAnalyses>(); 4509 PA.preserve<GlobalsAA>(); 4510 return PA; 4511 } 4512 4513 PreservedAnalyses SROA::run(Function &F, FunctionAnalysisManager &AM) { 4514 return runImpl(F, AM.getResult<DominatorTreeAnalysis>(F), 4515 AM.getResult<AssumptionAnalysis>(F)); 4516 } 4517 4518 /// A legacy pass for the legacy pass manager that wraps the \c SROA pass. 4519 /// 4520 /// This is in the llvm namespace purely to allow it to be a friend of the \c 4521 /// SROA pass. 4522 class llvm::sroa::SROALegacyPass : public FunctionPass { 4523 /// The SROA implementation. 4524 SROA Impl; 4525 4526 public: 4527 static char ID; 4528 4529 SROALegacyPass() : FunctionPass(ID) { 4530 initializeSROALegacyPassPass(*PassRegistry::getPassRegistry()); 4531 } 4532 4533 bool runOnFunction(Function &F) override { 4534 if (skipFunction(F)) 4535 return false; 4536 4537 auto PA = Impl.runImpl( 4538 F, getAnalysis<DominatorTreeWrapperPass>().getDomTree(), 4539 getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F)); 4540 return !PA.areAllPreserved(); 4541 } 4542 4543 void getAnalysisUsage(AnalysisUsage &AU) const override { 4544 AU.addRequired<AssumptionCacheTracker>(); 4545 AU.addRequired<DominatorTreeWrapperPass>(); 4546 AU.addPreserved<GlobalsAAWrapperPass>(); 4547 AU.setPreservesCFG(); 4548 } 4549 4550 StringRef getPassName() const override { return "SROA"; } 4551 }; 4552 4553 char SROALegacyPass::ID = 0; 4554 4555 FunctionPass *llvm::createSROAPass() { return new SROALegacyPass(); } 4556 4557 INITIALIZE_PASS_BEGIN(SROALegacyPass, "sroa", 4558 "Scalar Replacement Of Aggregates", false, false) 4559 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 4560 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 4561 INITIALIZE_PASS_END(SROALegacyPass, "sroa", "Scalar Replacement Of Aggregates", 4562 false, false) 4563