1 //===- SROA.cpp - Scalar Replacement Of Aggregates ------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 /// \file 9 /// This transformation implements the well known scalar replacement of 10 /// aggregates transformation. It tries to identify promotable elements of an 11 /// aggregate alloca, and promote them to registers. It will also try to 12 /// convert uses of an element (or set of elements) of an alloca into a vector 13 /// or bitfield-style integer scalar if appropriate. 14 /// 15 /// It works to do this with minimal slicing of the alloca so that regions 16 /// which are merely transferred in and out of external memory remain unchanged 17 /// and are not decomposed to scalar code. 18 /// 19 /// Because this also performs alloca promotion, it can be thought of as also 20 /// serving the purpose of SSA formation. The algorithm iterates on the 21 /// function until all opportunities for promotion have been realized. 22 /// 23 //===----------------------------------------------------------------------===// 24 25 #include "llvm/Transforms/Scalar/SROA.h" 26 #include "llvm/ADT/APInt.h" 27 #include "llvm/ADT/ArrayRef.h" 28 #include "llvm/ADT/DenseMap.h" 29 #include "llvm/ADT/PointerIntPair.h" 30 #include "llvm/ADT/STLExtras.h" 31 #include "llvm/ADT/SetVector.h" 32 #include "llvm/ADT/SmallBitVector.h" 33 #include "llvm/ADT/SmallPtrSet.h" 34 #include "llvm/ADT/SmallVector.h" 35 #include "llvm/ADT/Statistic.h" 36 #include "llvm/ADT/StringRef.h" 37 #include "llvm/ADT/Twine.h" 38 #include "llvm/ADT/iterator.h" 39 #include "llvm/ADT/iterator_range.h" 40 #include "llvm/Analysis/AssumptionCache.h" 41 #include "llvm/Analysis/GlobalsModRef.h" 42 #include "llvm/Analysis/Loads.h" 43 #include "llvm/Analysis/PtrUseVisitor.h" 44 #include "llvm/Config/llvm-config.h" 45 #include "llvm/IR/BasicBlock.h" 46 #include "llvm/IR/Constant.h" 47 #include "llvm/IR/ConstantFolder.h" 48 #include "llvm/IR/Constants.h" 49 #include "llvm/IR/DIBuilder.h" 50 #include "llvm/IR/DataLayout.h" 51 #include "llvm/IR/DebugInfo.h" 52 #include "llvm/IR/DebugInfoMetadata.h" 53 #include "llvm/IR/DerivedTypes.h" 54 #include "llvm/IR/Dominators.h" 55 #include "llvm/IR/Function.h" 56 #include "llvm/IR/GetElementPtrTypeIterator.h" 57 #include "llvm/IR/GlobalAlias.h" 58 #include "llvm/IR/IRBuilder.h" 59 #include "llvm/IR/InstVisitor.h" 60 #include "llvm/IR/Instruction.h" 61 #include "llvm/IR/Instructions.h" 62 #include "llvm/IR/IntrinsicInst.h" 63 #include "llvm/IR/LLVMContext.h" 64 #include "llvm/IR/Metadata.h" 65 #include "llvm/IR/Module.h" 66 #include "llvm/IR/Operator.h" 67 #include "llvm/IR/PassManager.h" 68 #include "llvm/IR/Type.h" 69 #include "llvm/IR/Use.h" 70 #include "llvm/IR/User.h" 71 #include "llvm/IR/Value.h" 72 #include "llvm/InitializePasses.h" 73 #include "llvm/Pass.h" 74 #include "llvm/Support/Casting.h" 75 #include "llvm/Support/CommandLine.h" 76 #include "llvm/Support/Compiler.h" 77 #include "llvm/Support/Debug.h" 78 #include "llvm/Support/ErrorHandling.h" 79 #include "llvm/Support/raw_ostream.h" 80 #include "llvm/Transforms/Scalar.h" 81 #include "llvm/Transforms/Utils/Local.h" 82 #include "llvm/Transforms/Utils/PromoteMemToReg.h" 83 #include <algorithm> 84 #include <cassert> 85 #include <cstddef> 86 #include <cstdint> 87 #include <cstring> 88 #include <iterator> 89 #include <string> 90 #include <tuple> 91 #include <utility> 92 #include <vector> 93 94 using namespace llvm; 95 using namespace llvm::sroa; 96 97 #define DEBUG_TYPE "sroa" 98 99 STATISTIC(NumAllocasAnalyzed, "Number of allocas analyzed for replacement"); 100 STATISTIC(NumAllocaPartitions, "Number of alloca partitions formed"); 101 STATISTIC(MaxPartitionsPerAlloca, "Maximum number of partitions per alloca"); 102 STATISTIC(NumAllocaPartitionUses, "Number of alloca partition uses rewritten"); 103 STATISTIC(MaxUsesPerAllocaPartition, "Maximum number of uses of a partition"); 104 STATISTIC(NumNewAllocas, "Number of new, smaller allocas introduced"); 105 STATISTIC(NumPromoted, "Number of allocas promoted to SSA values"); 106 STATISTIC(NumLoadsSpeculated, "Number of loads speculated to allow promotion"); 107 STATISTIC(NumDeleted, "Number of instructions deleted"); 108 STATISTIC(NumVectorized, "Number of vectorized aggregates"); 109 110 /// Hidden option to experiment with completely strict handling of inbounds 111 /// GEPs. 112 static cl::opt<bool> SROAStrictInbounds("sroa-strict-inbounds", cl::init(false), 113 cl::Hidden); 114 115 namespace { 116 117 /// A custom IRBuilder inserter which prefixes all names, but only in 118 /// Assert builds. 119 class IRBuilderPrefixedInserter final : public IRBuilderDefaultInserter { 120 std::string Prefix; 121 122 Twine getNameWithPrefix(const Twine &Name) const { 123 return Name.isTriviallyEmpty() ? Name : Prefix + Name; 124 } 125 126 public: 127 void SetNamePrefix(const Twine &P) { Prefix = P.str(); } 128 129 void InsertHelper(Instruction *I, const Twine &Name, BasicBlock *BB, 130 BasicBlock::iterator InsertPt) const override { 131 IRBuilderDefaultInserter::InsertHelper(I, getNameWithPrefix(Name), BB, 132 InsertPt); 133 } 134 }; 135 136 /// Provide a type for IRBuilder that drops names in release builds. 137 using IRBuilderTy = IRBuilder<ConstantFolder, IRBuilderPrefixedInserter>; 138 139 /// A used slice of an alloca. 140 /// 141 /// This structure represents a slice of an alloca used by some instruction. It 142 /// stores both the begin and end offsets of this use, a pointer to the use 143 /// itself, and a flag indicating whether we can classify the use as splittable 144 /// or not when forming partitions of the alloca. 145 class Slice { 146 /// The beginning offset of the range. 147 uint64_t BeginOffset = 0; 148 149 /// The ending offset, not included in the range. 150 uint64_t EndOffset = 0; 151 152 /// Storage for both the use of this slice and whether it can be 153 /// split. 154 PointerIntPair<Use *, 1, bool> UseAndIsSplittable; 155 156 public: 157 Slice() = default; 158 159 Slice(uint64_t BeginOffset, uint64_t EndOffset, Use *U, bool IsSplittable) 160 : BeginOffset(BeginOffset), EndOffset(EndOffset), 161 UseAndIsSplittable(U, IsSplittable) {} 162 163 uint64_t beginOffset() const { return BeginOffset; } 164 uint64_t endOffset() const { return EndOffset; } 165 166 bool isSplittable() const { return UseAndIsSplittable.getInt(); } 167 void makeUnsplittable() { UseAndIsSplittable.setInt(false); } 168 169 Use *getUse() const { return UseAndIsSplittable.getPointer(); } 170 171 bool isDead() const { return getUse() == nullptr; } 172 void kill() { UseAndIsSplittable.setPointer(nullptr); } 173 174 /// Support for ordering ranges. 175 /// 176 /// This provides an ordering over ranges such that start offsets are 177 /// always increasing, and within equal start offsets, the end offsets are 178 /// decreasing. Thus the spanning range comes first in a cluster with the 179 /// same start position. 180 bool operator<(const Slice &RHS) const { 181 if (beginOffset() < RHS.beginOffset()) 182 return true; 183 if (beginOffset() > RHS.beginOffset()) 184 return false; 185 if (isSplittable() != RHS.isSplittable()) 186 return !isSplittable(); 187 if (endOffset() > RHS.endOffset()) 188 return true; 189 return false; 190 } 191 192 /// Support comparison with a single offset to allow binary searches. 193 friend LLVM_ATTRIBUTE_UNUSED bool operator<(const Slice &LHS, 194 uint64_t RHSOffset) { 195 return LHS.beginOffset() < RHSOffset; 196 } 197 friend LLVM_ATTRIBUTE_UNUSED bool operator<(uint64_t LHSOffset, 198 const Slice &RHS) { 199 return LHSOffset < RHS.beginOffset(); 200 } 201 202 bool operator==(const Slice &RHS) const { 203 return isSplittable() == RHS.isSplittable() && 204 beginOffset() == RHS.beginOffset() && endOffset() == RHS.endOffset(); 205 } 206 bool operator!=(const Slice &RHS) const { return !operator==(RHS); } 207 }; 208 209 } // end anonymous namespace 210 211 /// Representation of the alloca slices. 212 /// 213 /// This class represents the slices of an alloca which are formed by its 214 /// various uses. If a pointer escapes, we can't fully build a representation 215 /// for the slices used and we reflect that in this structure. The uses are 216 /// stored, sorted by increasing beginning offset and with unsplittable slices 217 /// starting at a particular offset before splittable slices. 218 class llvm::sroa::AllocaSlices { 219 public: 220 /// Construct the slices of a particular alloca. 221 AllocaSlices(const DataLayout &DL, AllocaInst &AI); 222 223 /// Test whether a pointer to the allocation escapes our analysis. 224 /// 225 /// If this is true, the slices are never fully built and should be 226 /// ignored. 227 bool isEscaped() const { return PointerEscapingInstr; } 228 229 /// Support for iterating over the slices. 230 /// @{ 231 using iterator = SmallVectorImpl<Slice>::iterator; 232 using range = iterator_range<iterator>; 233 234 iterator begin() { return Slices.begin(); } 235 iterator end() { return Slices.end(); } 236 237 using const_iterator = SmallVectorImpl<Slice>::const_iterator; 238 using const_range = iterator_range<const_iterator>; 239 240 const_iterator begin() const { return Slices.begin(); } 241 const_iterator end() const { return Slices.end(); } 242 /// @} 243 244 /// Erase a range of slices. 245 void erase(iterator Start, iterator Stop) { Slices.erase(Start, Stop); } 246 247 /// Insert new slices for this alloca. 248 /// 249 /// This moves the slices into the alloca's slices collection, and re-sorts 250 /// everything so that the usual ordering properties of the alloca's slices 251 /// hold. 252 void insert(ArrayRef<Slice> NewSlices) { 253 int OldSize = Slices.size(); 254 Slices.append(NewSlices.begin(), NewSlices.end()); 255 auto SliceI = Slices.begin() + OldSize; 256 llvm::sort(SliceI, Slices.end()); 257 std::inplace_merge(Slices.begin(), SliceI, Slices.end()); 258 } 259 260 // Forward declare the iterator and range accessor for walking the 261 // partitions. 262 class partition_iterator; 263 iterator_range<partition_iterator> partitions(); 264 265 /// Access the dead users for this alloca. 266 ArrayRef<Instruction *> getDeadUsers() const { return DeadUsers; } 267 268 /// Access Uses that should be dropped if the alloca is promotable. 269 ArrayRef<Use *> getDeadUsesIfPromotable() const { 270 return DeadUseIfPromotable; 271 } 272 273 /// Access the dead operands referring to this alloca. 274 /// 275 /// These are operands which have cannot actually be used to refer to the 276 /// alloca as they are outside its range and the user doesn't correct for 277 /// that. These mostly consist of PHI node inputs and the like which we just 278 /// need to replace with undef. 279 ArrayRef<Use *> getDeadOperands() const { return DeadOperands; } 280 281 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 282 void print(raw_ostream &OS, const_iterator I, StringRef Indent = " ") const; 283 void printSlice(raw_ostream &OS, const_iterator I, 284 StringRef Indent = " ") const; 285 void printUse(raw_ostream &OS, const_iterator I, 286 StringRef Indent = " ") const; 287 void print(raw_ostream &OS) const; 288 void dump(const_iterator I) const; 289 void dump() const; 290 #endif 291 292 private: 293 template <typename DerivedT, typename RetT = void> class BuilderBase; 294 class SliceBuilder; 295 296 friend class AllocaSlices::SliceBuilder; 297 298 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 299 /// Handle to alloca instruction to simplify method interfaces. 300 AllocaInst &AI; 301 #endif 302 303 /// The instruction responsible for this alloca not having a known set 304 /// of slices. 305 /// 306 /// When an instruction (potentially) escapes the pointer to the alloca, we 307 /// store a pointer to that here and abort trying to form slices of the 308 /// alloca. This will be null if the alloca slices are analyzed successfully. 309 Instruction *PointerEscapingInstr; 310 311 /// The slices of the alloca. 312 /// 313 /// We store a vector of the slices formed by uses of the alloca here. This 314 /// vector is sorted by increasing begin offset, and then the unsplittable 315 /// slices before the splittable ones. See the Slice inner class for more 316 /// details. 317 SmallVector<Slice, 8> Slices; 318 319 /// Instructions which will become dead if we rewrite the alloca. 320 /// 321 /// Note that these are not separated by slice. This is because we expect an 322 /// alloca to be completely rewritten or not rewritten at all. If rewritten, 323 /// all these instructions can simply be removed and replaced with poison as 324 /// they come from outside of the allocated space. 325 SmallVector<Instruction *, 8> DeadUsers; 326 327 /// Uses which will become dead if can promote the alloca. 328 SmallVector<Use *, 8> DeadUseIfPromotable; 329 330 /// Operands which will become dead if we rewrite the alloca. 331 /// 332 /// These are operands that in their particular use can be replaced with 333 /// poison when we rewrite the alloca. These show up in out-of-bounds inputs 334 /// to PHI nodes and the like. They aren't entirely dead (there might be 335 /// a GEP back into the bounds using it elsewhere) and nor is the PHI, but we 336 /// want to swap this particular input for poison to simplify the use lists of 337 /// the alloca. 338 SmallVector<Use *, 8> DeadOperands; 339 }; 340 341 /// A partition of the slices. 342 /// 343 /// An ephemeral representation for a range of slices which can be viewed as 344 /// a partition of the alloca. This range represents a span of the alloca's 345 /// memory which cannot be split, and provides access to all of the slices 346 /// overlapping some part of the partition. 347 /// 348 /// Objects of this type are produced by traversing the alloca's slices, but 349 /// are only ephemeral and not persistent. 350 class llvm::sroa::Partition { 351 private: 352 friend class AllocaSlices; 353 friend class AllocaSlices::partition_iterator; 354 355 using iterator = AllocaSlices::iterator; 356 357 /// The beginning and ending offsets of the alloca for this 358 /// partition. 359 uint64_t BeginOffset = 0, EndOffset = 0; 360 361 /// The start and end iterators of this partition. 362 iterator SI, SJ; 363 364 /// A collection of split slice tails overlapping the partition. 365 SmallVector<Slice *, 4> SplitTails; 366 367 /// Raw constructor builds an empty partition starting and ending at 368 /// the given iterator. 369 Partition(iterator SI) : SI(SI), SJ(SI) {} 370 371 public: 372 /// The start offset of this partition. 373 /// 374 /// All of the contained slices start at or after this offset. 375 uint64_t beginOffset() const { return BeginOffset; } 376 377 /// The end offset of this partition. 378 /// 379 /// All of the contained slices end at or before this offset. 380 uint64_t endOffset() const { return EndOffset; } 381 382 /// The size of the partition. 383 /// 384 /// Note that this can never be zero. 385 uint64_t size() const { 386 assert(BeginOffset < EndOffset && "Partitions must span some bytes!"); 387 return EndOffset - BeginOffset; 388 } 389 390 /// Test whether this partition contains no slices, and merely spans 391 /// a region occupied by split slices. 392 bool empty() const { return SI == SJ; } 393 394 /// \name Iterate slices that start within the partition. 395 /// These may be splittable or unsplittable. They have a begin offset >= the 396 /// partition begin offset. 397 /// @{ 398 // FIXME: We should probably define a "concat_iterator" helper and use that 399 // to stitch together pointee_iterators over the split tails and the 400 // contiguous iterators of the partition. That would give a much nicer 401 // interface here. We could then additionally expose filtered iterators for 402 // split, unsplit, and unsplittable splices based on the usage patterns. 403 iterator begin() const { return SI; } 404 iterator end() const { return SJ; } 405 /// @} 406 407 /// Get the sequence of split slice tails. 408 /// 409 /// These tails are of slices which start before this partition but are 410 /// split and overlap into the partition. We accumulate these while forming 411 /// partitions. 412 ArrayRef<Slice *> splitSliceTails() const { return SplitTails; } 413 }; 414 415 /// An iterator over partitions of the alloca's slices. 416 /// 417 /// This iterator implements the core algorithm for partitioning the alloca's 418 /// slices. It is a forward iterator as we don't support backtracking for 419 /// efficiency reasons, and re-use a single storage area to maintain the 420 /// current set of split slices. 421 /// 422 /// It is templated on the slice iterator type to use so that it can operate 423 /// with either const or non-const slice iterators. 424 class AllocaSlices::partition_iterator 425 : public iterator_facade_base<partition_iterator, std::forward_iterator_tag, 426 Partition> { 427 friend class AllocaSlices; 428 429 /// Most of the state for walking the partitions is held in a class 430 /// with a nice interface for examining them. 431 Partition P; 432 433 /// We need to keep the end of the slices to know when to stop. 434 AllocaSlices::iterator SE; 435 436 /// We also need to keep track of the maximum split end offset seen. 437 /// FIXME: Do we really? 438 uint64_t MaxSplitSliceEndOffset = 0; 439 440 /// Sets the partition to be empty at given iterator, and sets the 441 /// end iterator. 442 partition_iterator(AllocaSlices::iterator SI, AllocaSlices::iterator SE) 443 : P(SI), SE(SE) { 444 // If not already at the end, advance our state to form the initial 445 // partition. 446 if (SI != SE) 447 advance(); 448 } 449 450 /// Advance the iterator to the next partition. 451 /// 452 /// Requires that the iterator not be at the end of the slices. 453 void advance() { 454 assert((P.SI != SE || !P.SplitTails.empty()) && 455 "Cannot advance past the end of the slices!"); 456 457 // Clear out any split uses which have ended. 458 if (!P.SplitTails.empty()) { 459 if (P.EndOffset >= MaxSplitSliceEndOffset) { 460 // If we've finished all splits, this is easy. 461 P.SplitTails.clear(); 462 MaxSplitSliceEndOffset = 0; 463 } else { 464 // Remove the uses which have ended in the prior partition. This 465 // cannot change the max split slice end because we just checked that 466 // the prior partition ended prior to that max. 467 llvm::erase_if(P.SplitTails, 468 [&](Slice *S) { return S->endOffset() <= P.EndOffset; }); 469 assert(llvm::any_of(P.SplitTails, 470 [&](Slice *S) { 471 return S->endOffset() == MaxSplitSliceEndOffset; 472 }) && 473 "Could not find the current max split slice offset!"); 474 assert(llvm::all_of(P.SplitTails, 475 [&](Slice *S) { 476 return S->endOffset() <= MaxSplitSliceEndOffset; 477 }) && 478 "Max split slice end offset is not actually the max!"); 479 } 480 } 481 482 // If P.SI is already at the end, then we've cleared the split tail and 483 // now have an end iterator. 484 if (P.SI == SE) { 485 assert(P.SplitTails.empty() && "Failed to clear the split slices!"); 486 return; 487 } 488 489 // If we had a non-empty partition previously, set up the state for 490 // subsequent partitions. 491 if (P.SI != P.SJ) { 492 // Accumulate all the splittable slices which started in the old 493 // partition into the split list. 494 for (Slice &S : P) 495 if (S.isSplittable() && S.endOffset() > P.EndOffset) { 496 P.SplitTails.push_back(&S); 497 MaxSplitSliceEndOffset = 498 std::max(S.endOffset(), MaxSplitSliceEndOffset); 499 } 500 501 // Start from the end of the previous partition. 502 P.SI = P.SJ; 503 504 // If P.SI is now at the end, we at most have a tail of split slices. 505 if (P.SI == SE) { 506 P.BeginOffset = P.EndOffset; 507 P.EndOffset = MaxSplitSliceEndOffset; 508 return; 509 } 510 511 // If the we have split slices and the next slice is after a gap and is 512 // not splittable immediately form an empty partition for the split 513 // slices up until the next slice begins. 514 if (!P.SplitTails.empty() && P.SI->beginOffset() != P.EndOffset && 515 !P.SI->isSplittable()) { 516 P.BeginOffset = P.EndOffset; 517 P.EndOffset = P.SI->beginOffset(); 518 return; 519 } 520 } 521 522 // OK, we need to consume new slices. Set the end offset based on the 523 // current slice, and step SJ past it. The beginning offset of the 524 // partition is the beginning offset of the next slice unless we have 525 // pre-existing split slices that are continuing, in which case we begin 526 // at the prior end offset. 527 P.BeginOffset = P.SplitTails.empty() ? P.SI->beginOffset() : P.EndOffset; 528 P.EndOffset = P.SI->endOffset(); 529 ++P.SJ; 530 531 // There are two strategies to form a partition based on whether the 532 // partition starts with an unsplittable slice or a splittable slice. 533 if (!P.SI->isSplittable()) { 534 // When we're forming an unsplittable region, it must always start at 535 // the first slice and will extend through its end. 536 assert(P.BeginOffset == P.SI->beginOffset()); 537 538 // Form a partition including all of the overlapping slices with this 539 // unsplittable slice. 540 while (P.SJ != SE && P.SJ->beginOffset() < P.EndOffset) { 541 if (!P.SJ->isSplittable()) 542 P.EndOffset = std::max(P.EndOffset, P.SJ->endOffset()); 543 ++P.SJ; 544 } 545 546 // We have a partition across a set of overlapping unsplittable 547 // partitions. 548 return; 549 } 550 551 // If we're starting with a splittable slice, then we need to form 552 // a synthetic partition spanning it and any other overlapping splittable 553 // splices. 554 assert(P.SI->isSplittable() && "Forming a splittable partition!"); 555 556 // Collect all of the overlapping splittable slices. 557 while (P.SJ != SE && P.SJ->beginOffset() < P.EndOffset && 558 P.SJ->isSplittable()) { 559 P.EndOffset = std::max(P.EndOffset, P.SJ->endOffset()); 560 ++P.SJ; 561 } 562 563 // Back upiP.EndOffset if we ended the span early when encountering an 564 // unsplittable slice. This synthesizes the early end offset of 565 // a partition spanning only splittable slices. 566 if (P.SJ != SE && P.SJ->beginOffset() < P.EndOffset) { 567 assert(!P.SJ->isSplittable()); 568 P.EndOffset = P.SJ->beginOffset(); 569 } 570 } 571 572 public: 573 bool operator==(const partition_iterator &RHS) const { 574 assert(SE == RHS.SE && 575 "End iterators don't match between compared partition iterators!"); 576 577 // The observed positions of partitions is marked by the P.SI iterator and 578 // the emptiness of the split slices. The latter is only relevant when 579 // P.SI == SE, as the end iterator will additionally have an empty split 580 // slices list, but the prior may have the same P.SI and a tail of split 581 // slices. 582 if (P.SI == RHS.P.SI && P.SplitTails.empty() == RHS.P.SplitTails.empty()) { 583 assert(P.SJ == RHS.P.SJ && 584 "Same set of slices formed two different sized partitions!"); 585 assert(P.SplitTails.size() == RHS.P.SplitTails.size() && 586 "Same slice position with differently sized non-empty split " 587 "slice tails!"); 588 return true; 589 } 590 return false; 591 } 592 593 partition_iterator &operator++() { 594 advance(); 595 return *this; 596 } 597 598 Partition &operator*() { return P; } 599 }; 600 601 /// A forward range over the partitions of the alloca's slices. 602 /// 603 /// This accesses an iterator range over the partitions of the alloca's 604 /// slices. It computes these partitions on the fly based on the overlapping 605 /// offsets of the slices and the ability to split them. It will visit "empty" 606 /// partitions to cover regions of the alloca only accessed via split 607 /// slices. 608 iterator_range<AllocaSlices::partition_iterator> AllocaSlices::partitions() { 609 return make_range(partition_iterator(begin(), end()), 610 partition_iterator(end(), end())); 611 } 612 613 static Value *foldSelectInst(SelectInst &SI) { 614 // If the condition being selected on is a constant or the same value is 615 // being selected between, fold the select. Yes this does (rarely) happen 616 // early on. 617 if (ConstantInt *CI = dyn_cast<ConstantInt>(SI.getCondition())) 618 return SI.getOperand(1 + CI->isZero()); 619 if (SI.getOperand(1) == SI.getOperand(2)) 620 return SI.getOperand(1); 621 622 return nullptr; 623 } 624 625 /// A helper that folds a PHI node or a select. 626 static Value *foldPHINodeOrSelectInst(Instruction &I) { 627 if (PHINode *PN = dyn_cast<PHINode>(&I)) { 628 // If PN merges together the same value, return that value. 629 return PN->hasConstantValue(); 630 } 631 return foldSelectInst(cast<SelectInst>(I)); 632 } 633 634 /// Builder for the alloca slices. 635 /// 636 /// This class builds a set of alloca slices by recursively visiting the uses 637 /// of an alloca and making a slice for each load and store at each offset. 638 class AllocaSlices::SliceBuilder : public PtrUseVisitor<SliceBuilder> { 639 friend class PtrUseVisitor<SliceBuilder>; 640 friend class InstVisitor<SliceBuilder>; 641 642 using Base = PtrUseVisitor<SliceBuilder>; 643 644 const uint64_t AllocSize; 645 AllocaSlices &AS; 646 647 SmallDenseMap<Instruction *, unsigned> MemTransferSliceMap; 648 SmallDenseMap<Instruction *, uint64_t> PHIOrSelectSizes; 649 650 /// Set to de-duplicate dead instructions found in the use walk. 651 SmallPtrSet<Instruction *, 4> VisitedDeadInsts; 652 653 public: 654 SliceBuilder(const DataLayout &DL, AllocaInst &AI, AllocaSlices &AS) 655 : PtrUseVisitor<SliceBuilder>(DL), 656 AllocSize(DL.getTypeAllocSize(AI.getAllocatedType()).getFixedSize()), 657 AS(AS) {} 658 659 private: 660 void markAsDead(Instruction &I) { 661 if (VisitedDeadInsts.insert(&I).second) 662 AS.DeadUsers.push_back(&I); 663 } 664 665 void insertUse(Instruction &I, const APInt &Offset, uint64_t Size, 666 bool IsSplittable = false) { 667 // Completely skip uses which have a zero size or start either before or 668 // past the end of the allocation. 669 if (Size == 0 || Offset.uge(AllocSize)) { 670 LLVM_DEBUG(dbgs() << "WARNING: Ignoring " << Size << " byte use @" 671 << Offset 672 << " which has zero size or starts outside of the " 673 << AllocSize << " byte alloca:\n" 674 << " alloca: " << AS.AI << "\n" 675 << " use: " << I << "\n"); 676 return markAsDead(I); 677 } 678 679 uint64_t BeginOffset = Offset.getZExtValue(); 680 uint64_t EndOffset = BeginOffset + Size; 681 682 // Clamp the end offset to the end of the allocation. Note that this is 683 // formulated to handle even the case where "BeginOffset + Size" overflows. 684 // This may appear superficially to be something we could ignore entirely, 685 // but that is not so! There may be widened loads or PHI-node uses where 686 // some instructions are dead but not others. We can't completely ignore 687 // them, and so have to record at least the information here. 688 assert(AllocSize >= BeginOffset); // Established above. 689 if (Size > AllocSize - BeginOffset) { 690 LLVM_DEBUG(dbgs() << "WARNING: Clamping a " << Size << " byte use @" 691 << Offset << " to remain within the " << AllocSize 692 << " byte alloca:\n" 693 << " alloca: " << AS.AI << "\n" 694 << " use: " << I << "\n"); 695 EndOffset = AllocSize; 696 } 697 698 AS.Slices.push_back(Slice(BeginOffset, EndOffset, U, IsSplittable)); 699 } 700 701 void visitBitCastInst(BitCastInst &BC) { 702 if (BC.use_empty()) 703 return markAsDead(BC); 704 705 return Base::visitBitCastInst(BC); 706 } 707 708 void visitAddrSpaceCastInst(AddrSpaceCastInst &ASC) { 709 if (ASC.use_empty()) 710 return markAsDead(ASC); 711 712 return Base::visitAddrSpaceCastInst(ASC); 713 } 714 715 void visitGetElementPtrInst(GetElementPtrInst &GEPI) { 716 if (GEPI.use_empty()) 717 return markAsDead(GEPI); 718 719 if (SROAStrictInbounds && GEPI.isInBounds()) { 720 // FIXME: This is a manually un-factored variant of the basic code inside 721 // of GEPs with checking of the inbounds invariant specified in the 722 // langref in a very strict sense. If we ever want to enable 723 // SROAStrictInbounds, this code should be factored cleanly into 724 // PtrUseVisitor, but it is easier to experiment with SROAStrictInbounds 725 // by writing out the code here where we have the underlying allocation 726 // size readily available. 727 APInt GEPOffset = Offset; 728 const DataLayout &DL = GEPI.getModule()->getDataLayout(); 729 for (gep_type_iterator GTI = gep_type_begin(GEPI), 730 GTE = gep_type_end(GEPI); 731 GTI != GTE; ++GTI) { 732 ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand()); 733 if (!OpC) 734 break; 735 736 // Handle a struct index, which adds its field offset to the pointer. 737 if (StructType *STy = GTI.getStructTypeOrNull()) { 738 unsigned ElementIdx = OpC->getZExtValue(); 739 const StructLayout *SL = DL.getStructLayout(STy); 740 GEPOffset += 741 APInt(Offset.getBitWidth(), SL->getElementOffset(ElementIdx)); 742 } else { 743 // For array or vector indices, scale the index by the size of the 744 // type. 745 APInt Index = OpC->getValue().sextOrTrunc(Offset.getBitWidth()); 746 GEPOffset += 747 Index * 748 APInt(Offset.getBitWidth(), 749 DL.getTypeAllocSize(GTI.getIndexedType()).getFixedSize()); 750 } 751 752 // If this index has computed an intermediate pointer which is not 753 // inbounds, then the result of the GEP is a poison value and we can 754 // delete it and all uses. 755 if (GEPOffset.ugt(AllocSize)) 756 return markAsDead(GEPI); 757 } 758 } 759 760 return Base::visitGetElementPtrInst(GEPI); 761 } 762 763 void handleLoadOrStore(Type *Ty, Instruction &I, const APInt &Offset, 764 uint64_t Size, bool IsVolatile) { 765 // We allow splitting of non-volatile loads and stores where the type is an 766 // integer type. These may be used to implement 'memcpy' or other "transfer 767 // of bits" patterns. 768 bool IsSplittable = 769 Ty->isIntegerTy() && !IsVolatile && DL.typeSizeEqualsStoreSize(Ty); 770 771 insertUse(I, Offset, Size, IsSplittable); 772 } 773 774 void visitLoadInst(LoadInst &LI) { 775 assert((!LI.isSimple() || LI.getType()->isSingleValueType()) && 776 "All simple FCA loads should have been pre-split"); 777 778 if (!IsOffsetKnown) 779 return PI.setAborted(&LI); 780 781 if (LI.isVolatile() && 782 LI.getPointerAddressSpace() != DL.getAllocaAddrSpace()) 783 return PI.setAborted(&LI); 784 785 if (isa<ScalableVectorType>(LI.getType())) 786 return PI.setAborted(&LI); 787 788 uint64_t Size = DL.getTypeStoreSize(LI.getType()).getFixedSize(); 789 return handleLoadOrStore(LI.getType(), LI, Offset, Size, LI.isVolatile()); 790 } 791 792 void visitStoreInst(StoreInst &SI) { 793 Value *ValOp = SI.getValueOperand(); 794 if (ValOp == *U) 795 return PI.setEscapedAndAborted(&SI); 796 if (!IsOffsetKnown) 797 return PI.setAborted(&SI); 798 799 if (SI.isVolatile() && 800 SI.getPointerAddressSpace() != DL.getAllocaAddrSpace()) 801 return PI.setAborted(&SI); 802 803 if (isa<ScalableVectorType>(ValOp->getType())) 804 return PI.setAborted(&SI); 805 806 uint64_t Size = DL.getTypeStoreSize(ValOp->getType()).getFixedSize(); 807 808 // If this memory access can be shown to *statically* extend outside the 809 // bounds of the allocation, it's behavior is undefined, so simply 810 // ignore it. Note that this is more strict than the generic clamping 811 // behavior of insertUse. We also try to handle cases which might run the 812 // risk of overflow. 813 // FIXME: We should instead consider the pointer to have escaped if this 814 // function is being instrumented for addressing bugs or race conditions. 815 if (Size > AllocSize || Offset.ugt(AllocSize - Size)) { 816 LLVM_DEBUG(dbgs() << "WARNING: Ignoring " << Size << " byte store @" 817 << Offset << " which extends past the end of the " 818 << AllocSize << " byte alloca:\n" 819 << " alloca: " << AS.AI << "\n" 820 << " use: " << SI << "\n"); 821 return markAsDead(SI); 822 } 823 824 assert((!SI.isSimple() || ValOp->getType()->isSingleValueType()) && 825 "All simple FCA stores should have been pre-split"); 826 handleLoadOrStore(ValOp->getType(), SI, Offset, Size, SI.isVolatile()); 827 } 828 829 void visitMemSetInst(MemSetInst &II) { 830 assert(II.getRawDest() == *U && "Pointer use is not the destination?"); 831 ConstantInt *Length = dyn_cast<ConstantInt>(II.getLength()); 832 if ((Length && Length->getValue() == 0) || 833 (IsOffsetKnown && Offset.uge(AllocSize))) 834 // Zero-length mem transfer intrinsics can be ignored entirely. 835 return markAsDead(II); 836 837 if (!IsOffsetKnown) 838 return PI.setAborted(&II); 839 840 // Don't replace this with a store with a different address space. TODO: 841 // Use a store with the casted new alloca? 842 if (II.isVolatile() && II.getDestAddressSpace() != DL.getAllocaAddrSpace()) 843 return PI.setAborted(&II); 844 845 insertUse(II, Offset, Length ? Length->getLimitedValue() 846 : AllocSize - Offset.getLimitedValue(), 847 (bool)Length); 848 } 849 850 void visitMemTransferInst(MemTransferInst &II) { 851 ConstantInt *Length = dyn_cast<ConstantInt>(II.getLength()); 852 if (Length && Length->getValue() == 0) 853 // Zero-length mem transfer intrinsics can be ignored entirely. 854 return markAsDead(II); 855 856 // Because we can visit these intrinsics twice, also check to see if the 857 // first time marked this instruction as dead. If so, skip it. 858 if (VisitedDeadInsts.count(&II)) 859 return; 860 861 if (!IsOffsetKnown) 862 return PI.setAborted(&II); 863 864 // Don't replace this with a load/store with a different address space. 865 // TODO: Use a store with the casted new alloca? 866 if (II.isVolatile() && 867 (II.getDestAddressSpace() != DL.getAllocaAddrSpace() || 868 II.getSourceAddressSpace() != DL.getAllocaAddrSpace())) 869 return PI.setAborted(&II); 870 871 // This side of the transfer is completely out-of-bounds, and so we can 872 // nuke the entire transfer. However, we also need to nuke the other side 873 // if already added to our partitions. 874 // FIXME: Yet another place we really should bypass this when 875 // instrumenting for ASan. 876 if (Offset.uge(AllocSize)) { 877 SmallDenseMap<Instruction *, unsigned>::iterator MTPI = 878 MemTransferSliceMap.find(&II); 879 if (MTPI != MemTransferSliceMap.end()) 880 AS.Slices[MTPI->second].kill(); 881 return markAsDead(II); 882 } 883 884 uint64_t RawOffset = Offset.getLimitedValue(); 885 uint64_t Size = Length ? Length->getLimitedValue() : AllocSize - RawOffset; 886 887 // Check for the special case where the same exact value is used for both 888 // source and dest. 889 if (*U == II.getRawDest() && *U == II.getRawSource()) { 890 // For non-volatile transfers this is a no-op. 891 if (!II.isVolatile()) 892 return markAsDead(II); 893 894 return insertUse(II, Offset, Size, /*IsSplittable=*/false); 895 } 896 897 // If we have seen both source and destination for a mem transfer, then 898 // they both point to the same alloca. 899 bool Inserted; 900 SmallDenseMap<Instruction *, unsigned>::iterator MTPI; 901 std::tie(MTPI, Inserted) = 902 MemTransferSliceMap.insert(std::make_pair(&II, AS.Slices.size())); 903 unsigned PrevIdx = MTPI->second; 904 if (!Inserted) { 905 Slice &PrevP = AS.Slices[PrevIdx]; 906 907 // Check if the begin offsets match and this is a non-volatile transfer. 908 // In that case, we can completely elide the transfer. 909 if (!II.isVolatile() && PrevP.beginOffset() == RawOffset) { 910 PrevP.kill(); 911 return markAsDead(II); 912 } 913 914 // Otherwise we have an offset transfer within the same alloca. We can't 915 // split those. 916 PrevP.makeUnsplittable(); 917 } 918 919 // Insert the use now that we've fixed up the splittable nature. 920 insertUse(II, Offset, Size, /*IsSplittable=*/Inserted && Length); 921 922 // Check that we ended up with a valid index in the map. 923 assert(AS.Slices[PrevIdx].getUse()->getUser() == &II && 924 "Map index doesn't point back to a slice with this user."); 925 } 926 927 // Disable SRoA for any intrinsics except for lifetime invariants and 928 // invariant group. 929 // FIXME: What about debug intrinsics? This matches old behavior, but 930 // doesn't make sense. 931 void visitIntrinsicInst(IntrinsicInst &II) { 932 if (II.isDroppable()) { 933 AS.DeadUseIfPromotable.push_back(U); 934 return; 935 } 936 937 if (!IsOffsetKnown) 938 return PI.setAborted(&II); 939 940 if (II.isLifetimeStartOrEnd()) { 941 ConstantInt *Length = cast<ConstantInt>(II.getArgOperand(0)); 942 uint64_t Size = std::min(AllocSize - Offset.getLimitedValue(), 943 Length->getLimitedValue()); 944 insertUse(II, Offset, Size, true); 945 return; 946 } 947 948 if (II.isLaunderOrStripInvariantGroup()) { 949 enqueueUsers(II); 950 return; 951 } 952 953 Base::visitIntrinsicInst(II); 954 } 955 956 Instruction *hasUnsafePHIOrSelectUse(Instruction *Root, uint64_t &Size) { 957 // We consider any PHI or select that results in a direct load or store of 958 // the same offset to be a viable use for slicing purposes. These uses 959 // are considered unsplittable and the size is the maximum loaded or stored 960 // size. 961 SmallPtrSet<Instruction *, 4> Visited; 962 SmallVector<std::pair<Instruction *, Instruction *>, 4> Uses; 963 Visited.insert(Root); 964 Uses.push_back(std::make_pair(cast<Instruction>(*U), Root)); 965 const DataLayout &DL = Root->getModule()->getDataLayout(); 966 // If there are no loads or stores, the access is dead. We mark that as 967 // a size zero access. 968 Size = 0; 969 do { 970 Instruction *I, *UsedI; 971 std::tie(UsedI, I) = Uses.pop_back_val(); 972 973 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 974 Size = std::max(Size, 975 DL.getTypeStoreSize(LI->getType()).getFixedSize()); 976 continue; 977 } 978 if (StoreInst *SI = dyn_cast<StoreInst>(I)) { 979 Value *Op = SI->getOperand(0); 980 if (Op == UsedI) 981 return SI; 982 Size = std::max(Size, 983 DL.getTypeStoreSize(Op->getType()).getFixedSize()); 984 continue; 985 } 986 987 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I)) { 988 if (!GEP->hasAllZeroIndices()) 989 return GEP; 990 } else if (!isa<BitCastInst>(I) && !isa<PHINode>(I) && 991 !isa<SelectInst>(I) && !isa<AddrSpaceCastInst>(I)) { 992 return I; 993 } 994 995 for (User *U : I->users()) 996 if (Visited.insert(cast<Instruction>(U)).second) 997 Uses.push_back(std::make_pair(I, cast<Instruction>(U))); 998 } while (!Uses.empty()); 999 1000 return nullptr; 1001 } 1002 1003 void visitPHINodeOrSelectInst(Instruction &I) { 1004 assert(isa<PHINode>(I) || isa<SelectInst>(I)); 1005 if (I.use_empty()) 1006 return markAsDead(I); 1007 1008 // If this is a PHI node before a catchswitch, we cannot insert any non-PHI 1009 // instructions in this BB, which may be required during rewriting. Bail out 1010 // on these cases. 1011 if (isa<PHINode>(I) && 1012 I.getParent()->getFirstInsertionPt() == I.getParent()->end()) 1013 return PI.setAborted(&I); 1014 1015 // TODO: We could use SimplifyInstruction here to fold PHINodes and 1016 // SelectInsts. However, doing so requires to change the current 1017 // dead-operand-tracking mechanism. For instance, suppose neither loading 1018 // from %U nor %other traps. Then "load (select undef, %U, %other)" does not 1019 // trap either. However, if we simply replace %U with undef using the 1020 // current dead-operand-tracking mechanism, "load (select undef, undef, 1021 // %other)" may trap because the select may return the first operand 1022 // "undef". 1023 if (Value *Result = foldPHINodeOrSelectInst(I)) { 1024 if (Result == *U) 1025 // If the result of the constant fold will be the pointer, recurse 1026 // through the PHI/select as if we had RAUW'ed it. 1027 enqueueUsers(I); 1028 else 1029 // Otherwise the operand to the PHI/select is dead, and we can replace 1030 // it with poison. 1031 AS.DeadOperands.push_back(U); 1032 1033 return; 1034 } 1035 1036 if (!IsOffsetKnown) 1037 return PI.setAborted(&I); 1038 1039 // See if we already have computed info on this node. 1040 uint64_t &Size = PHIOrSelectSizes[&I]; 1041 if (!Size) { 1042 // This is a new PHI/Select, check for an unsafe use of it. 1043 if (Instruction *UnsafeI = hasUnsafePHIOrSelectUse(&I, Size)) 1044 return PI.setAborted(UnsafeI); 1045 } 1046 1047 // For PHI and select operands outside the alloca, we can't nuke the entire 1048 // phi or select -- the other side might still be relevant, so we special 1049 // case them here and use a separate structure to track the operands 1050 // themselves which should be replaced with poison. 1051 // FIXME: This should instead be escaped in the event we're instrumenting 1052 // for address sanitization. 1053 if (Offset.uge(AllocSize)) { 1054 AS.DeadOperands.push_back(U); 1055 return; 1056 } 1057 1058 insertUse(I, Offset, Size); 1059 } 1060 1061 void visitPHINode(PHINode &PN) { visitPHINodeOrSelectInst(PN); } 1062 1063 void visitSelectInst(SelectInst &SI) { visitPHINodeOrSelectInst(SI); } 1064 1065 /// Disable SROA entirely if there are unhandled users of the alloca. 1066 void visitInstruction(Instruction &I) { PI.setAborted(&I); } 1067 }; 1068 1069 AllocaSlices::AllocaSlices(const DataLayout &DL, AllocaInst &AI) 1070 : 1071 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1072 AI(AI), 1073 #endif 1074 PointerEscapingInstr(nullptr) { 1075 SliceBuilder PB(DL, AI, *this); 1076 SliceBuilder::PtrInfo PtrI = PB.visitPtr(AI); 1077 if (PtrI.isEscaped() || PtrI.isAborted()) { 1078 // FIXME: We should sink the escape vs. abort info into the caller nicely, 1079 // possibly by just storing the PtrInfo in the AllocaSlices. 1080 PointerEscapingInstr = PtrI.getEscapingInst() ? PtrI.getEscapingInst() 1081 : PtrI.getAbortingInst(); 1082 assert(PointerEscapingInstr && "Did not track a bad instruction"); 1083 return; 1084 } 1085 1086 llvm::erase_if(Slices, [](const Slice &S) { return S.isDead(); }); 1087 1088 // Sort the uses. This arranges for the offsets to be in ascending order, 1089 // and the sizes to be in descending order. 1090 llvm::stable_sort(Slices); 1091 } 1092 1093 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1094 1095 void AllocaSlices::print(raw_ostream &OS, const_iterator I, 1096 StringRef Indent) const { 1097 printSlice(OS, I, Indent); 1098 OS << "\n"; 1099 printUse(OS, I, Indent); 1100 } 1101 1102 void AllocaSlices::printSlice(raw_ostream &OS, const_iterator I, 1103 StringRef Indent) const { 1104 OS << Indent << "[" << I->beginOffset() << "," << I->endOffset() << ")" 1105 << " slice #" << (I - begin()) 1106 << (I->isSplittable() ? " (splittable)" : ""); 1107 } 1108 1109 void AllocaSlices::printUse(raw_ostream &OS, const_iterator I, 1110 StringRef Indent) const { 1111 OS << Indent << " used by: " << *I->getUse()->getUser() << "\n"; 1112 } 1113 1114 void AllocaSlices::print(raw_ostream &OS) const { 1115 if (PointerEscapingInstr) { 1116 OS << "Can't analyze slices for alloca: " << AI << "\n" 1117 << " A pointer to this alloca escaped by:\n" 1118 << " " << *PointerEscapingInstr << "\n"; 1119 return; 1120 } 1121 1122 OS << "Slices of alloca: " << AI << "\n"; 1123 for (const_iterator I = begin(), E = end(); I != E; ++I) 1124 print(OS, I); 1125 } 1126 1127 LLVM_DUMP_METHOD void AllocaSlices::dump(const_iterator I) const { 1128 print(dbgs(), I); 1129 } 1130 LLVM_DUMP_METHOD void AllocaSlices::dump() const { print(dbgs()); } 1131 1132 #endif // !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1133 1134 /// Walk the range of a partitioning looking for a common type to cover this 1135 /// sequence of slices. 1136 static std::pair<Type *, IntegerType *> 1137 findCommonType(AllocaSlices::const_iterator B, AllocaSlices::const_iterator E, 1138 uint64_t EndOffset) { 1139 Type *Ty = nullptr; 1140 bool TyIsCommon = true; 1141 IntegerType *ITy = nullptr; 1142 1143 // Note that we need to look at *every* alloca slice's Use to ensure we 1144 // always get consistent results regardless of the order of slices. 1145 for (AllocaSlices::const_iterator I = B; I != E; ++I) { 1146 Use *U = I->getUse(); 1147 if (isa<IntrinsicInst>(*U->getUser())) 1148 continue; 1149 if (I->beginOffset() != B->beginOffset() || I->endOffset() != EndOffset) 1150 continue; 1151 1152 Type *UserTy = nullptr; 1153 if (LoadInst *LI = dyn_cast<LoadInst>(U->getUser())) { 1154 UserTy = LI->getType(); 1155 } else if (StoreInst *SI = dyn_cast<StoreInst>(U->getUser())) { 1156 UserTy = SI->getValueOperand()->getType(); 1157 } 1158 1159 if (IntegerType *UserITy = dyn_cast_or_null<IntegerType>(UserTy)) { 1160 // If the type is larger than the partition, skip it. We only encounter 1161 // this for split integer operations where we want to use the type of the 1162 // entity causing the split. Also skip if the type is not a byte width 1163 // multiple. 1164 if (UserITy->getBitWidth() % 8 != 0 || 1165 UserITy->getBitWidth() / 8 > (EndOffset - B->beginOffset())) 1166 continue; 1167 1168 // Track the largest bitwidth integer type used in this way in case there 1169 // is no common type. 1170 if (!ITy || ITy->getBitWidth() < UserITy->getBitWidth()) 1171 ITy = UserITy; 1172 } 1173 1174 // To avoid depending on the order of slices, Ty and TyIsCommon must not 1175 // depend on types skipped above. 1176 if (!UserTy || (Ty && Ty != UserTy)) 1177 TyIsCommon = false; // Give up on anything but an iN type. 1178 else 1179 Ty = UserTy; 1180 } 1181 1182 return {TyIsCommon ? Ty : nullptr, ITy}; 1183 } 1184 1185 /// PHI instructions that use an alloca and are subsequently loaded can be 1186 /// rewritten to load both input pointers in the pred blocks and then PHI the 1187 /// results, allowing the load of the alloca to be promoted. 1188 /// From this: 1189 /// %P2 = phi [i32* %Alloca, i32* %Other] 1190 /// %V = load i32* %P2 1191 /// to: 1192 /// %V1 = load i32* %Alloca -> will be mem2reg'd 1193 /// ... 1194 /// %V2 = load i32* %Other 1195 /// ... 1196 /// %V = phi [i32 %V1, i32 %V2] 1197 /// 1198 /// We can do this to a select if its only uses are loads and if the operands 1199 /// to the select can be loaded unconditionally. 1200 /// 1201 /// FIXME: This should be hoisted into a generic utility, likely in 1202 /// Transforms/Util/Local.h 1203 static bool isSafePHIToSpeculate(PHINode &PN) { 1204 const DataLayout &DL = PN.getModule()->getDataLayout(); 1205 1206 // For now, we can only do this promotion if the load is in the same block 1207 // as the PHI, and if there are no stores between the phi and load. 1208 // TODO: Allow recursive phi users. 1209 // TODO: Allow stores. 1210 BasicBlock *BB = PN.getParent(); 1211 Align MaxAlign; 1212 uint64_t APWidth = DL.getIndexTypeSizeInBits(PN.getType()); 1213 APInt MaxSize(APWidth, 0); 1214 bool HaveLoad = false; 1215 for (User *U : PN.users()) { 1216 LoadInst *LI = dyn_cast<LoadInst>(U); 1217 if (!LI || !LI->isSimple()) 1218 return false; 1219 1220 // For now we only allow loads in the same block as the PHI. This is 1221 // a common case that happens when instcombine merges two loads through 1222 // a PHI. 1223 if (LI->getParent() != BB) 1224 return false; 1225 1226 // Ensure that there are no instructions between the PHI and the load that 1227 // could store. 1228 for (BasicBlock::iterator BBI(PN); &*BBI != LI; ++BBI) 1229 if (BBI->mayWriteToMemory()) 1230 return false; 1231 1232 uint64_t Size = DL.getTypeStoreSize(LI->getType()).getFixedSize(); 1233 MaxAlign = std::max(MaxAlign, LI->getAlign()); 1234 MaxSize = MaxSize.ult(Size) ? APInt(APWidth, Size) : MaxSize; 1235 HaveLoad = true; 1236 } 1237 1238 if (!HaveLoad) 1239 return false; 1240 1241 // We can only transform this if it is safe to push the loads into the 1242 // predecessor blocks. The only thing to watch out for is that we can't put 1243 // a possibly trapping load in the predecessor if it is a critical edge. 1244 for (unsigned Idx = 0, Num = PN.getNumIncomingValues(); Idx != Num; ++Idx) { 1245 Instruction *TI = PN.getIncomingBlock(Idx)->getTerminator(); 1246 Value *InVal = PN.getIncomingValue(Idx); 1247 1248 // If the value is produced by the terminator of the predecessor (an 1249 // invoke) or it has side-effects, there is no valid place to put a load 1250 // in the predecessor. 1251 if (TI == InVal || TI->mayHaveSideEffects()) 1252 return false; 1253 1254 // If the predecessor has a single successor, then the edge isn't 1255 // critical. 1256 if (TI->getNumSuccessors() == 1) 1257 continue; 1258 1259 // If this pointer is always safe to load, or if we can prove that there 1260 // is already a load in the block, then we can move the load to the pred 1261 // block. 1262 if (isSafeToLoadUnconditionally(InVal, MaxAlign, MaxSize, DL, TI)) 1263 continue; 1264 1265 return false; 1266 } 1267 1268 return true; 1269 } 1270 1271 static void speculatePHINodeLoads(IRBuilderTy &IRB, PHINode &PN) { 1272 LLVM_DEBUG(dbgs() << " original: " << PN << "\n"); 1273 1274 LoadInst *SomeLoad = cast<LoadInst>(PN.user_back()); 1275 Type *LoadTy = SomeLoad->getType(); 1276 IRB.SetInsertPoint(&PN); 1277 PHINode *NewPN = IRB.CreatePHI(LoadTy, PN.getNumIncomingValues(), 1278 PN.getName() + ".sroa.speculated"); 1279 1280 // Get the AA tags and alignment to use from one of the loads. It does not 1281 // matter which one we get and if any differ. 1282 AAMDNodes AATags = SomeLoad->getAAMetadata(); 1283 Align Alignment = SomeLoad->getAlign(); 1284 1285 // Rewrite all loads of the PN to use the new PHI. 1286 while (!PN.use_empty()) { 1287 LoadInst *LI = cast<LoadInst>(PN.user_back()); 1288 LI->replaceAllUsesWith(NewPN); 1289 LI->eraseFromParent(); 1290 } 1291 1292 // Inject loads into all of the pred blocks. 1293 DenseMap<BasicBlock*, Value*> InjectedLoads; 1294 for (unsigned Idx = 0, Num = PN.getNumIncomingValues(); Idx != Num; ++Idx) { 1295 BasicBlock *Pred = PN.getIncomingBlock(Idx); 1296 Value *InVal = PN.getIncomingValue(Idx); 1297 1298 // A PHI node is allowed to have multiple (duplicated) entries for the same 1299 // basic block, as long as the value is the same. So if we already injected 1300 // a load in the predecessor, then we should reuse the same load for all 1301 // duplicated entries. 1302 if (Value* V = InjectedLoads.lookup(Pred)) { 1303 NewPN->addIncoming(V, Pred); 1304 continue; 1305 } 1306 1307 Instruction *TI = Pred->getTerminator(); 1308 IRB.SetInsertPoint(TI); 1309 1310 LoadInst *Load = IRB.CreateAlignedLoad( 1311 LoadTy, InVal, Alignment, 1312 (PN.getName() + ".sroa.speculate.load." + Pred->getName())); 1313 ++NumLoadsSpeculated; 1314 if (AATags) 1315 Load->setAAMetadata(AATags); 1316 NewPN->addIncoming(Load, Pred); 1317 InjectedLoads[Pred] = Load; 1318 } 1319 1320 LLVM_DEBUG(dbgs() << " speculated to: " << *NewPN << "\n"); 1321 PN.eraseFromParent(); 1322 } 1323 1324 /// Select instructions that use an alloca and are subsequently loaded can be 1325 /// rewritten to load both input pointers and then select between the result, 1326 /// allowing the load of the alloca to be promoted. 1327 /// From this: 1328 /// %P2 = select i1 %cond, i32* %Alloca, i32* %Other 1329 /// %V = load i32* %P2 1330 /// to: 1331 /// %V1 = load i32* %Alloca -> will be mem2reg'd 1332 /// %V2 = load i32* %Other 1333 /// %V = select i1 %cond, i32 %V1, i32 %V2 1334 /// 1335 /// We can do this to a select if its only uses are loads and if the operand 1336 /// to the select can be loaded unconditionally. If found an intervening bitcast 1337 /// with a single use of the load, allow the promotion. 1338 static bool isSafeSelectToSpeculate(SelectInst &SI) { 1339 Value *TValue = SI.getTrueValue(); 1340 Value *FValue = SI.getFalseValue(); 1341 const DataLayout &DL = SI.getModule()->getDataLayout(); 1342 1343 for (User *U : SI.users()) { 1344 LoadInst *LI; 1345 BitCastInst *BC = dyn_cast<BitCastInst>(U); 1346 if (BC && BC->hasOneUse()) 1347 LI = dyn_cast<LoadInst>(*BC->user_begin()); 1348 else 1349 LI = dyn_cast<LoadInst>(U); 1350 1351 if (!LI || !LI->isSimple()) 1352 return false; 1353 1354 // Both operands to the select need to be dereferenceable, either 1355 // absolutely (e.g. allocas) or at this point because we can see other 1356 // accesses to it. 1357 if (!isSafeToLoadUnconditionally(TValue, LI->getType(), 1358 LI->getAlign(), DL, LI)) 1359 return false; 1360 if (!isSafeToLoadUnconditionally(FValue, LI->getType(), 1361 LI->getAlign(), DL, LI)) 1362 return false; 1363 } 1364 1365 return true; 1366 } 1367 1368 static void speculateSelectInstLoads(IRBuilderTy &IRB, SelectInst &SI) { 1369 LLVM_DEBUG(dbgs() << " original: " << SI << "\n"); 1370 1371 IRB.SetInsertPoint(&SI); 1372 Value *TV = SI.getTrueValue(); 1373 Value *FV = SI.getFalseValue(); 1374 // Replace the loads of the select with a select of two loads. 1375 while (!SI.use_empty()) { 1376 LoadInst *LI; 1377 BitCastInst *BC = dyn_cast<BitCastInst>(SI.user_back()); 1378 if (BC) { 1379 assert(BC->hasOneUse() && "Bitcast should have a single use."); 1380 LI = cast<LoadInst>(BC->user_back()); 1381 } else { 1382 LI = cast<LoadInst>(SI.user_back()); 1383 } 1384 1385 assert(LI->isSimple() && "We only speculate simple loads"); 1386 1387 IRB.SetInsertPoint(LI); 1388 Value *NewTV = 1389 BC ? IRB.CreateBitCast(TV, BC->getType(), TV->getName() + ".sroa.cast") 1390 : TV; 1391 Value *NewFV = 1392 BC ? IRB.CreateBitCast(FV, BC->getType(), FV->getName() + ".sroa.cast") 1393 : FV; 1394 LoadInst *TL = IRB.CreateLoad(LI->getType(), NewTV, 1395 LI->getName() + ".sroa.speculate.load.true"); 1396 LoadInst *FL = IRB.CreateLoad(LI->getType(), NewFV, 1397 LI->getName() + ".sroa.speculate.load.false"); 1398 NumLoadsSpeculated += 2; 1399 1400 // Transfer alignment and AA info if present. 1401 TL->setAlignment(LI->getAlign()); 1402 FL->setAlignment(LI->getAlign()); 1403 1404 AAMDNodes Tags = LI->getAAMetadata(); 1405 if (Tags) { 1406 TL->setAAMetadata(Tags); 1407 FL->setAAMetadata(Tags); 1408 } 1409 1410 Value *V = IRB.CreateSelect(SI.getCondition(), TL, FL, 1411 LI->getName() + ".sroa.speculated"); 1412 1413 LLVM_DEBUG(dbgs() << " speculated to: " << *V << "\n"); 1414 LI->replaceAllUsesWith(V); 1415 LI->eraseFromParent(); 1416 if (BC) 1417 BC->eraseFromParent(); 1418 } 1419 SI.eraseFromParent(); 1420 } 1421 1422 /// Build a GEP out of a base pointer and indices. 1423 /// 1424 /// This will return the BasePtr if that is valid, or build a new GEP 1425 /// instruction using the IRBuilder if GEP-ing is needed. 1426 static Value *buildGEP(IRBuilderTy &IRB, Value *BasePtr, 1427 SmallVectorImpl<Value *> &Indices, 1428 const Twine &NamePrefix) { 1429 if (Indices.empty()) 1430 return BasePtr; 1431 1432 // A single zero index is a no-op, so check for this and avoid building a GEP 1433 // in that case. 1434 if (Indices.size() == 1 && cast<ConstantInt>(Indices.back())->isZero()) 1435 return BasePtr; 1436 1437 // buildGEP() is only called for non-opaque pointers. 1438 return IRB.CreateInBoundsGEP( 1439 BasePtr->getType()->getNonOpaquePointerElementType(), BasePtr, Indices, 1440 NamePrefix + "sroa_idx"); 1441 } 1442 1443 /// Get a natural GEP off of the BasePtr walking through Ty toward 1444 /// TargetTy without changing the offset of the pointer. 1445 /// 1446 /// This routine assumes we've already established a properly offset GEP with 1447 /// Indices, and arrived at the Ty type. The goal is to continue to GEP with 1448 /// zero-indices down through type layers until we find one the same as 1449 /// TargetTy. If we can't find one with the same type, we at least try to use 1450 /// one with the same size. If none of that works, we just produce the GEP as 1451 /// indicated by Indices to have the correct offset. 1452 static Value *getNaturalGEPWithType(IRBuilderTy &IRB, const DataLayout &DL, 1453 Value *BasePtr, Type *Ty, Type *TargetTy, 1454 SmallVectorImpl<Value *> &Indices, 1455 const Twine &NamePrefix) { 1456 if (Ty == TargetTy) 1457 return buildGEP(IRB, BasePtr, Indices, NamePrefix); 1458 1459 // Offset size to use for the indices. 1460 unsigned OffsetSize = DL.getIndexTypeSizeInBits(BasePtr->getType()); 1461 1462 // See if we can descend into a struct and locate a field with the correct 1463 // type. 1464 unsigned NumLayers = 0; 1465 Type *ElementTy = Ty; 1466 do { 1467 if (ElementTy->isPointerTy()) 1468 break; 1469 1470 if (ArrayType *ArrayTy = dyn_cast<ArrayType>(ElementTy)) { 1471 ElementTy = ArrayTy->getElementType(); 1472 Indices.push_back(IRB.getIntN(OffsetSize, 0)); 1473 } else if (VectorType *VectorTy = dyn_cast<VectorType>(ElementTy)) { 1474 ElementTy = VectorTy->getElementType(); 1475 Indices.push_back(IRB.getInt32(0)); 1476 } else if (StructType *STy = dyn_cast<StructType>(ElementTy)) { 1477 if (STy->element_begin() == STy->element_end()) 1478 break; // Nothing left to descend into. 1479 ElementTy = *STy->element_begin(); 1480 Indices.push_back(IRB.getInt32(0)); 1481 } else { 1482 break; 1483 } 1484 ++NumLayers; 1485 } while (ElementTy != TargetTy); 1486 if (ElementTy != TargetTy) 1487 Indices.erase(Indices.end() - NumLayers, Indices.end()); 1488 1489 return buildGEP(IRB, BasePtr, Indices, NamePrefix); 1490 } 1491 1492 /// Get a natural GEP from a base pointer to a particular offset and 1493 /// resulting in a particular type. 1494 /// 1495 /// The goal is to produce a "natural" looking GEP that works with the existing 1496 /// composite types to arrive at the appropriate offset and element type for 1497 /// a pointer. TargetTy is the element type the returned GEP should point-to if 1498 /// possible. We recurse by decreasing Offset, adding the appropriate index to 1499 /// Indices, and setting Ty to the result subtype. 1500 /// 1501 /// If no natural GEP can be constructed, this function returns null. 1502 static Value *getNaturalGEPWithOffset(IRBuilderTy &IRB, const DataLayout &DL, 1503 Value *Ptr, APInt Offset, Type *TargetTy, 1504 SmallVectorImpl<Value *> &Indices, 1505 const Twine &NamePrefix) { 1506 PointerType *Ty = cast<PointerType>(Ptr->getType()); 1507 1508 // Don't consider any GEPs through an i8* as natural unless the TargetTy is 1509 // an i8. 1510 if (Ty == IRB.getInt8PtrTy(Ty->getAddressSpace()) && TargetTy->isIntegerTy(8)) 1511 return nullptr; 1512 1513 Type *ElementTy = Ty->getNonOpaquePointerElementType(); 1514 if (!ElementTy->isSized()) 1515 return nullptr; // We can't GEP through an unsized element. 1516 1517 SmallVector<APInt> IntIndices = DL.getGEPIndicesForOffset(ElementTy, Offset); 1518 if (Offset != 0) 1519 return nullptr; 1520 1521 for (const APInt &Index : IntIndices) 1522 Indices.push_back(IRB.getInt(Index)); 1523 return getNaturalGEPWithType(IRB, DL, Ptr, ElementTy, TargetTy, Indices, 1524 NamePrefix); 1525 } 1526 1527 /// Compute an adjusted pointer from Ptr by Offset bytes where the 1528 /// resulting pointer has PointerTy. 1529 /// 1530 /// This tries very hard to compute a "natural" GEP which arrives at the offset 1531 /// and produces the pointer type desired. Where it cannot, it will try to use 1532 /// the natural GEP to arrive at the offset and bitcast to the type. Where that 1533 /// fails, it will try to use an existing i8* and GEP to the byte offset and 1534 /// bitcast to the type. 1535 /// 1536 /// The strategy for finding the more natural GEPs is to peel off layers of the 1537 /// pointer, walking back through bit casts and GEPs, searching for a base 1538 /// pointer from which we can compute a natural GEP with the desired 1539 /// properties. The algorithm tries to fold as many constant indices into 1540 /// a single GEP as possible, thus making each GEP more independent of the 1541 /// surrounding code. 1542 static Value *getAdjustedPtr(IRBuilderTy &IRB, const DataLayout &DL, Value *Ptr, 1543 APInt Offset, Type *PointerTy, 1544 const Twine &NamePrefix) { 1545 // Create i8 GEP for opaque pointers. 1546 if (Ptr->getType()->isOpaquePointerTy()) { 1547 if (Offset != 0) 1548 Ptr = IRB.CreateInBoundsGEP(IRB.getInt8Ty(), Ptr, IRB.getInt(Offset), 1549 NamePrefix + "sroa_idx"); 1550 return IRB.CreatePointerBitCastOrAddrSpaceCast(Ptr, PointerTy, 1551 NamePrefix + "sroa_cast"); 1552 } 1553 1554 // Even though we don't look through PHI nodes, we could be called on an 1555 // instruction in an unreachable block, which may be on a cycle. 1556 SmallPtrSet<Value *, 4> Visited; 1557 Visited.insert(Ptr); 1558 SmallVector<Value *, 4> Indices; 1559 1560 // We may end up computing an offset pointer that has the wrong type. If we 1561 // never are able to compute one directly that has the correct type, we'll 1562 // fall back to it, so keep it and the base it was computed from around here. 1563 Value *OffsetPtr = nullptr; 1564 Value *OffsetBasePtr; 1565 1566 // Remember any i8 pointer we come across to re-use if we need to do a raw 1567 // byte offset. 1568 Value *Int8Ptr = nullptr; 1569 APInt Int8PtrOffset(Offset.getBitWidth(), 0); 1570 1571 PointerType *TargetPtrTy = cast<PointerType>(PointerTy); 1572 Type *TargetTy = TargetPtrTy->getNonOpaquePointerElementType(); 1573 1574 // As `addrspacecast` is , `Ptr` (the storage pointer) may have different 1575 // address space from the expected `PointerTy` (the pointer to be used). 1576 // Adjust the pointer type based the original storage pointer. 1577 auto AS = cast<PointerType>(Ptr->getType())->getAddressSpace(); 1578 PointerTy = TargetTy->getPointerTo(AS); 1579 1580 do { 1581 // First fold any existing GEPs into the offset. 1582 while (GEPOperator *GEP = dyn_cast<GEPOperator>(Ptr)) { 1583 APInt GEPOffset(Offset.getBitWidth(), 0); 1584 if (!GEP->accumulateConstantOffset(DL, GEPOffset)) 1585 break; 1586 Offset += GEPOffset; 1587 Ptr = GEP->getPointerOperand(); 1588 if (!Visited.insert(Ptr).second) 1589 break; 1590 } 1591 1592 // See if we can perform a natural GEP here. 1593 Indices.clear(); 1594 if (Value *P = getNaturalGEPWithOffset(IRB, DL, Ptr, Offset, TargetTy, 1595 Indices, NamePrefix)) { 1596 // If we have a new natural pointer at the offset, clear out any old 1597 // offset pointer we computed. Unless it is the base pointer or 1598 // a non-instruction, we built a GEP we don't need. Zap it. 1599 if (OffsetPtr && OffsetPtr != OffsetBasePtr) 1600 if (Instruction *I = dyn_cast<Instruction>(OffsetPtr)) { 1601 assert(I->use_empty() && "Built a GEP with uses some how!"); 1602 I->eraseFromParent(); 1603 } 1604 OffsetPtr = P; 1605 OffsetBasePtr = Ptr; 1606 // If we also found a pointer of the right type, we're done. 1607 if (P->getType() == PointerTy) 1608 break; 1609 } 1610 1611 // Stash this pointer if we've found an i8*. 1612 if (Ptr->getType()->isIntegerTy(8)) { 1613 Int8Ptr = Ptr; 1614 Int8PtrOffset = Offset; 1615 } 1616 1617 // Peel off a layer of the pointer and update the offset appropriately. 1618 if (Operator::getOpcode(Ptr) == Instruction::BitCast) { 1619 Ptr = cast<Operator>(Ptr)->getOperand(0); 1620 } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(Ptr)) { 1621 if (GA->isInterposable()) 1622 break; 1623 Ptr = GA->getAliasee(); 1624 } else { 1625 break; 1626 } 1627 assert(Ptr->getType()->isPointerTy() && "Unexpected operand type!"); 1628 } while (Visited.insert(Ptr).second); 1629 1630 if (!OffsetPtr) { 1631 if (!Int8Ptr) { 1632 Int8Ptr = IRB.CreateBitCast( 1633 Ptr, IRB.getInt8PtrTy(PointerTy->getPointerAddressSpace()), 1634 NamePrefix + "sroa_raw_cast"); 1635 Int8PtrOffset = Offset; 1636 } 1637 1638 OffsetPtr = Int8PtrOffset == 0 1639 ? Int8Ptr 1640 : IRB.CreateInBoundsGEP(IRB.getInt8Ty(), Int8Ptr, 1641 IRB.getInt(Int8PtrOffset), 1642 NamePrefix + "sroa_raw_idx"); 1643 } 1644 Ptr = OffsetPtr; 1645 1646 // On the off chance we were targeting i8*, guard the bitcast here. 1647 if (cast<PointerType>(Ptr->getType()) != TargetPtrTy) { 1648 Ptr = IRB.CreatePointerBitCastOrAddrSpaceCast(Ptr, 1649 TargetPtrTy, 1650 NamePrefix + "sroa_cast"); 1651 } 1652 1653 return Ptr; 1654 } 1655 1656 /// Compute the adjusted alignment for a load or store from an offset. 1657 static Align getAdjustedAlignment(Instruction *I, uint64_t Offset) { 1658 return commonAlignment(getLoadStoreAlignment(I), Offset); 1659 } 1660 1661 /// Test whether we can convert a value from the old to the new type. 1662 /// 1663 /// This predicate should be used to guard calls to convertValue in order to 1664 /// ensure that we only try to convert viable values. The strategy is that we 1665 /// will peel off single element struct and array wrappings to get to an 1666 /// underlying value, and convert that value. 1667 static bool canConvertValue(const DataLayout &DL, Type *OldTy, Type *NewTy) { 1668 if (OldTy == NewTy) 1669 return true; 1670 1671 // For integer types, we can't handle any bit-width differences. This would 1672 // break both vector conversions with extension and introduce endianness 1673 // issues when in conjunction with loads and stores. 1674 if (isa<IntegerType>(OldTy) && isa<IntegerType>(NewTy)) { 1675 assert(cast<IntegerType>(OldTy)->getBitWidth() != 1676 cast<IntegerType>(NewTy)->getBitWidth() && 1677 "We can't have the same bitwidth for different int types"); 1678 return false; 1679 } 1680 1681 if (DL.getTypeSizeInBits(NewTy).getFixedSize() != 1682 DL.getTypeSizeInBits(OldTy).getFixedSize()) 1683 return false; 1684 if (!NewTy->isSingleValueType() || !OldTy->isSingleValueType()) 1685 return false; 1686 1687 // We can convert pointers to integers and vice-versa. Same for vectors 1688 // of pointers and integers. 1689 OldTy = OldTy->getScalarType(); 1690 NewTy = NewTy->getScalarType(); 1691 if (NewTy->isPointerTy() || OldTy->isPointerTy()) { 1692 if (NewTy->isPointerTy() && OldTy->isPointerTy()) { 1693 unsigned OldAS = OldTy->getPointerAddressSpace(); 1694 unsigned NewAS = NewTy->getPointerAddressSpace(); 1695 // Convert pointers if they are pointers from the same address space or 1696 // different integral (not non-integral) address spaces with the same 1697 // pointer size. 1698 return OldAS == NewAS || 1699 (!DL.isNonIntegralAddressSpace(OldAS) && 1700 !DL.isNonIntegralAddressSpace(NewAS) && 1701 DL.getPointerSize(OldAS) == DL.getPointerSize(NewAS)); 1702 } 1703 1704 // We can convert integers to integral pointers, but not to non-integral 1705 // pointers. 1706 if (OldTy->isIntegerTy()) 1707 return !DL.isNonIntegralPointerType(NewTy); 1708 1709 // We can convert integral pointers to integers, but non-integral pointers 1710 // need to remain pointers. 1711 if (!DL.isNonIntegralPointerType(OldTy)) 1712 return NewTy->isIntegerTy(); 1713 1714 return false; 1715 } 1716 1717 return true; 1718 } 1719 1720 /// Generic routine to convert an SSA value to a value of a different 1721 /// type. 1722 /// 1723 /// This will try various different casting techniques, such as bitcasts, 1724 /// inttoptr, and ptrtoint casts. Use the \c canConvertValue predicate to test 1725 /// two types for viability with this routine. 1726 static Value *convertValue(const DataLayout &DL, IRBuilderTy &IRB, Value *V, 1727 Type *NewTy) { 1728 Type *OldTy = V->getType(); 1729 assert(canConvertValue(DL, OldTy, NewTy) && "Value not convertable to type"); 1730 1731 if (OldTy == NewTy) 1732 return V; 1733 1734 assert(!(isa<IntegerType>(OldTy) && isa<IntegerType>(NewTy)) && 1735 "Integer types must be the exact same to convert."); 1736 1737 // See if we need inttoptr for this type pair. May require additional bitcast. 1738 if (OldTy->isIntOrIntVectorTy() && NewTy->isPtrOrPtrVectorTy()) { 1739 // Expand <2 x i32> to i8* --> <2 x i32> to i64 to i8* 1740 // Expand i128 to <2 x i8*> --> i128 to <2 x i64> to <2 x i8*> 1741 // Expand <4 x i32> to <2 x i8*> --> <4 x i32> to <2 x i64> to <2 x i8*> 1742 // Directly handle i64 to i8* 1743 return IRB.CreateIntToPtr(IRB.CreateBitCast(V, DL.getIntPtrType(NewTy)), 1744 NewTy); 1745 } 1746 1747 // See if we need ptrtoint for this type pair. May require additional bitcast. 1748 if (OldTy->isPtrOrPtrVectorTy() && NewTy->isIntOrIntVectorTy()) { 1749 // Expand <2 x i8*> to i128 --> <2 x i8*> to <2 x i64> to i128 1750 // Expand i8* to <2 x i32> --> i8* to i64 to <2 x i32> 1751 // Expand <2 x i8*> to <4 x i32> --> <2 x i8*> to <2 x i64> to <4 x i32> 1752 // Expand i8* to i64 --> i8* to i64 to i64 1753 return IRB.CreateBitCast(IRB.CreatePtrToInt(V, DL.getIntPtrType(OldTy)), 1754 NewTy); 1755 } 1756 1757 if (OldTy->isPtrOrPtrVectorTy() && NewTy->isPtrOrPtrVectorTy()) { 1758 unsigned OldAS = OldTy->getPointerAddressSpace(); 1759 unsigned NewAS = NewTy->getPointerAddressSpace(); 1760 // To convert pointers with different address spaces (they are already 1761 // checked convertible, i.e. they have the same pointer size), so far we 1762 // cannot use `bitcast` (which has restrict on the same address space) or 1763 // `addrspacecast` (which is not always no-op casting). Instead, use a pair 1764 // of no-op `ptrtoint`/`inttoptr` casts through an integer with the same bit 1765 // size. 1766 if (OldAS != NewAS) { 1767 assert(DL.getPointerSize(OldAS) == DL.getPointerSize(NewAS)); 1768 return IRB.CreateIntToPtr(IRB.CreatePtrToInt(V, DL.getIntPtrType(OldTy)), 1769 NewTy); 1770 } 1771 } 1772 1773 return IRB.CreateBitCast(V, NewTy); 1774 } 1775 1776 /// Test whether the given slice use can be promoted to a vector. 1777 /// 1778 /// This function is called to test each entry in a partition which is slated 1779 /// for a single slice. 1780 static bool isVectorPromotionViableForSlice(Partition &P, const Slice &S, 1781 VectorType *Ty, 1782 uint64_t ElementSize, 1783 const DataLayout &DL) { 1784 // First validate the slice offsets. 1785 uint64_t BeginOffset = 1786 std::max(S.beginOffset(), P.beginOffset()) - P.beginOffset(); 1787 uint64_t BeginIndex = BeginOffset / ElementSize; 1788 if (BeginIndex * ElementSize != BeginOffset || 1789 BeginIndex >= cast<FixedVectorType>(Ty)->getNumElements()) 1790 return false; 1791 uint64_t EndOffset = 1792 std::min(S.endOffset(), P.endOffset()) - P.beginOffset(); 1793 uint64_t EndIndex = EndOffset / ElementSize; 1794 if (EndIndex * ElementSize != EndOffset || 1795 EndIndex > cast<FixedVectorType>(Ty)->getNumElements()) 1796 return false; 1797 1798 assert(EndIndex > BeginIndex && "Empty vector!"); 1799 uint64_t NumElements = EndIndex - BeginIndex; 1800 Type *SliceTy = (NumElements == 1) 1801 ? Ty->getElementType() 1802 : FixedVectorType::get(Ty->getElementType(), NumElements); 1803 1804 Type *SplitIntTy = 1805 Type::getIntNTy(Ty->getContext(), NumElements * ElementSize * 8); 1806 1807 Use *U = S.getUse(); 1808 1809 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(U->getUser())) { 1810 if (MI->isVolatile()) 1811 return false; 1812 if (!S.isSplittable()) 1813 return false; // Skip any unsplittable intrinsics. 1814 } else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(U->getUser())) { 1815 if (!II->isLifetimeStartOrEnd() && !II->isDroppable()) 1816 return false; 1817 } else if (LoadInst *LI = dyn_cast<LoadInst>(U->getUser())) { 1818 if (LI->isVolatile()) 1819 return false; 1820 Type *LTy = LI->getType(); 1821 // Disable vector promotion when there are loads or stores of an FCA. 1822 if (LTy->isStructTy()) 1823 return false; 1824 if (P.beginOffset() > S.beginOffset() || P.endOffset() < S.endOffset()) { 1825 assert(LTy->isIntegerTy()); 1826 LTy = SplitIntTy; 1827 } 1828 if (!canConvertValue(DL, SliceTy, LTy)) 1829 return false; 1830 } else if (StoreInst *SI = dyn_cast<StoreInst>(U->getUser())) { 1831 if (SI->isVolatile()) 1832 return false; 1833 Type *STy = SI->getValueOperand()->getType(); 1834 // Disable vector promotion when there are loads or stores of an FCA. 1835 if (STy->isStructTy()) 1836 return false; 1837 if (P.beginOffset() > S.beginOffset() || P.endOffset() < S.endOffset()) { 1838 assert(STy->isIntegerTy()); 1839 STy = SplitIntTy; 1840 } 1841 if (!canConvertValue(DL, STy, SliceTy)) 1842 return false; 1843 } else { 1844 return false; 1845 } 1846 1847 return true; 1848 } 1849 1850 /// Test whether the given alloca partitioning and range of slices can be 1851 /// promoted to a vector. 1852 /// 1853 /// This is a quick test to check whether we can rewrite a particular alloca 1854 /// partition (and its newly formed alloca) into a vector alloca with only 1855 /// whole-vector loads and stores such that it could be promoted to a vector 1856 /// SSA value. We only can ensure this for a limited set of operations, and we 1857 /// don't want to do the rewrites unless we are confident that the result will 1858 /// be promotable, so we have an early test here. 1859 static VectorType *isVectorPromotionViable(Partition &P, const DataLayout &DL) { 1860 // Collect the candidate types for vector-based promotion. Also track whether 1861 // we have different element types. 1862 SmallVector<VectorType *, 4> CandidateTys; 1863 Type *CommonEltTy = nullptr; 1864 bool HaveCommonEltTy = true; 1865 auto CheckCandidateType = [&](Type *Ty) { 1866 if (auto *VTy = dyn_cast<VectorType>(Ty)) { 1867 // Return if bitcast to vectors is different for total size in bits. 1868 if (!CandidateTys.empty()) { 1869 VectorType *V = CandidateTys[0]; 1870 if (DL.getTypeSizeInBits(VTy).getFixedSize() != 1871 DL.getTypeSizeInBits(V).getFixedSize()) { 1872 CandidateTys.clear(); 1873 return; 1874 } 1875 } 1876 CandidateTys.push_back(VTy); 1877 if (!CommonEltTy) 1878 CommonEltTy = VTy->getElementType(); 1879 else if (CommonEltTy != VTy->getElementType()) 1880 HaveCommonEltTy = false; 1881 } 1882 }; 1883 // Consider any loads or stores that are the exact size of the slice. 1884 for (const Slice &S : P) 1885 if (S.beginOffset() == P.beginOffset() && 1886 S.endOffset() == P.endOffset()) { 1887 if (auto *LI = dyn_cast<LoadInst>(S.getUse()->getUser())) 1888 CheckCandidateType(LI->getType()); 1889 else if (auto *SI = dyn_cast<StoreInst>(S.getUse()->getUser())) 1890 CheckCandidateType(SI->getValueOperand()->getType()); 1891 } 1892 1893 // If we didn't find a vector type, nothing to do here. 1894 if (CandidateTys.empty()) 1895 return nullptr; 1896 1897 // Remove non-integer vector types if we had multiple common element types. 1898 // FIXME: It'd be nice to replace them with integer vector types, but we can't 1899 // do that until all the backends are known to produce good code for all 1900 // integer vector types. 1901 if (!HaveCommonEltTy) { 1902 llvm::erase_if(CandidateTys, [](VectorType *VTy) { 1903 return !VTy->getElementType()->isIntegerTy(); 1904 }); 1905 1906 // If there were no integer vector types, give up. 1907 if (CandidateTys.empty()) 1908 return nullptr; 1909 1910 // Rank the remaining candidate vector types. This is easy because we know 1911 // they're all integer vectors. We sort by ascending number of elements. 1912 auto RankVectorTypes = [&DL](VectorType *RHSTy, VectorType *LHSTy) { 1913 (void)DL; 1914 assert(DL.getTypeSizeInBits(RHSTy).getFixedSize() == 1915 DL.getTypeSizeInBits(LHSTy).getFixedSize() && 1916 "Cannot have vector types of different sizes!"); 1917 assert(RHSTy->getElementType()->isIntegerTy() && 1918 "All non-integer types eliminated!"); 1919 assert(LHSTy->getElementType()->isIntegerTy() && 1920 "All non-integer types eliminated!"); 1921 return cast<FixedVectorType>(RHSTy)->getNumElements() < 1922 cast<FixedVectorType>(LHSTy)->getNumElements(); 1923 }; 1924 llvm::sort(CandidateTys, RankVectorTypes); 1925 CandidateTys.erase( 1926 std::unique(CandidateTys.begin(), CandidateTys.end(), RankVectorTypes), 1927 CandidateTys.end()); 1928 } else { 1929 // The only way to have the same element type in every vector type is to 1930 // have the same vector type. Check that and remove all but one. 1931 #ifndef NDEBUG 1932 for (VectorType *VTy : CandidateTys) { 1933 assert(VTy->getElementType() == CommonEltTy && 1934 "Unaccounted for element type!"); 1935 assert(VTy == CandidateTys[0] && 1936 "Different vector types with the same element type!"); 1937 } 1938 #endif 1939 CandidateTys.resize(1); 1940 } 1941 1942 // Try each vector type, and return the one which works. 1943 auto CheckVectorTypeForPromotion = [&](VectorType *VTy) { 1944 uint64_t ElementSize = 1945 DL.getTypeSizeInBits(VTy->getElementType()).getFixedSize(); 1946 1947 // While the definition of LLVM vectors is bitpacked, we don't support sizes 1948 // that aren't byte sized. 1949 if (ElementSize % 8) 1950 return false; 1951 assert((DL.getTypeSizeInBits(VTy).getFixedSize() % 8) == 0 && 1952 "vector size not a multiple of element size?"); 1953 ElementSize /= 8; 1954 1955 for (const Slice &S : P) 1956 if (!isVectorPromotionViableForSlice(P, S, VTy, ElementSize, DL)) 1957 return false; 1958 1959 for (const Slice *S : P.splitSliceTails()) 1960 if (!isVectorPromotionViableForSlice(P, *S, VTy, ElementSize, DL)) 1961 return false; 1962 1963 return true; 1964 }; 1965 for (VectorType *VTy : CandidateTys) 1966 if (CheckVectorTypeForPromotion(VTy)) 1967 return VTy; 1968 1969 return nullptr; 1970 } 1971 1972 /// Test whether a slice of an alloca is valid for integer widening. 1973 /// 1974 /// This implements the necessary checking for the \c isIntegerWideningViable 1975 /// test below on a single slice of the alloca. 1976 static bool isIntegerWideningViableForSlice(const Slice &S, 1977 uint64_t AllocBeginOffset, 1978 Type *AllocaTy, 1979 const DataLayout &DL, 1980 bool &WholeAllocaOp) { 1981 uint64_t Size = DL.getTypeStoreSize(AllocaTy).getFixedSize(); 1982 1983 uint64_t RelBegin = S.beginOffset() - AllocBeginOffset; 1984 uint64_t RelEnd = S.endOffset() - AllocBeginOffset; 1985 1986 // We can't reasonably handle cases where the load or store extends past 1987 // the end of the alloca's type and into its padding. 1988 if (RelEnd > Size) 1989 return false; 1990 1991 Use *U = S.getUse(); 1992 1993 if (LoadInst *LI = dyn_cast<LoadInst>(U->getUser())) { 1994 if (LI->isVolatile()) 1995 return false; 1996 // We can't handle loads that extend past the allocated memory. 1997 if (DL.getTypeStoreSize(LI->getType()).getFixedSize() > Size) 1998 return false; 1999 // So far, AllocaSliceRewriter does not support widening split slice tails 2000 // in rewriteIntegerLoad. 2001 if (S.beginOffset() < AllocBeginOffset) 2002 return false; 2003 // Note that we don't count vector loads or stores as whole-alloca 2004 // operations which enable integer widening because we would prefer to use 2005 // vector widening instead. 2006 if (!isa<VectorType>(LI->getType()) && RelBegin == 0 && RelEnd == Size) 2007 WholeAllocaOp = true; 2008 if (IntegerType *ITy = dyn_cast<IntegerType>(LI->getType())) { 2009 if (ITy->getBitWidth() < DL.getTypeStoreSizeInBits(ITy).getFixedSize()) 2010 return false; 2011 } else if (RelBegin != 0 || RelEnd != Size || 2012 !canConvertValue(DL, AllocaTy, LI->getType())) { 2013 // Non-integer loads need to be convertible from the alloca type so that 2014 // they are promotable. 2015 return false; 2016 } 2017 } else if (StoreInst *SI = dyn_cast<StoreInst>(U->getUser())) { 2018 Type *ValueTy = SI->getValueOperand()->getType(); 2019 if (SI->isVolatile()) 2020 return false; 2021 // We can't handle stores that extend past the allocated memory. 2022 if (DL.getTypeStoreSize(ValueTy).getFixedSize() > Size) 2023 return false; 2024 // So far, AllocaSliceRewriter does not support widening split slice tails 2025 // in rewriteIntegerStore. 2026 if (S.beginOffset() < AllocBeginOffset) 2027 return false; 2028 // Note that we don't count vector loads or stores as whole-alloca 2029 // operations which enable integer widening because we would prefer to use 2030 // vector widening instead. 2031 if (!isa<VectorType>(ValueTy) && RelBegin == 0 && RelEnd == Size) 2032 WholeAllocaOp = true; 2033 if (IntegerType *ITy = dyn_cast<IntegerType>(ValueTy)) { 2034 if (ITy->getBitWidth() < DL.getTypeStoreSizeInBits(ITy).getFixedSize()) 2035 return false; 2036 } else if (RelBegin != 0 || RelEnd != Size || 2037 !canConvertValue(DL, ValueTy, AllocaTy)) { 2038 // Non-integer stores need to be convertible to the alloca type so that 2039 // they are promotable. 2040 return false; 2041 } 2042 } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(U->getUser())) { 2043 if (MI->isVolatile() || !isa<Constant>(MI->getLength())) 2044 return false; 2045 if (!S.isSplittable()) 2046 return false; // Skip any unsplittable intrinsics. 2047 } else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(U->getUser())) { 2048 if (!II->isLifetimeStartOrEnd() && !II->isDroppable()) 2049 return false; 2050 } else { 2051 return false; 2052 } 2053 2054 return true; 2055 } 2056 2057 /// Test whether the given alloca partition's integer operations can be 2058 /// widened to promotable ones. 2059 /// 2060 /// This is a quick test to check whether we can rewrite the integer loads and 2061 /// stores to a particular alloca into wider loads and stores and be able to 2062 /// promote the resulting alloca. 2063 static bool isIntegerWideningViable(Partition &P, Type *AllocaTy, 2064 const DataLayout &DL) { 2065 uint64_t SizeInBits = DL.getTypeSizeInBits(AllocaTy).getFixedSize(); 2066 // Don't create integer types larger than the maximum bitwidth. 2067 if (SizeInBits > IntegerType::MAX_INT_BITS) 2068 return false; 2069 2070 // Don't try to handle allocas with bit-padding. 2071 if (SizeInBits != DL.getTypeStoreSizeInBits(AllocaTy).getFixedSize()) 2072 return false; 2073 2074 // We need to ensure that an integer type with the appropriate bitwidth can 2075 // be converted to the alloca type, whatever that is. We don't want to force 2076 // the alloca itself to have an integer type if there is a more suitable one. 2077 Type *IntTy = Type::getIntNTy(AllocaTy->getContext(), SizeInBits); 2078 if (!canConvertValue(DL, AllocaTy, IntTy) || 2079 !canConvertValue(DL, IntTy, AllocaTy)) 2080 return false; 2081 2082 // While examining uses, we ensure that the alloca has a covering load or 2083 // store. We don't want to widen the integer operations only to fail to 2084 // promote due to some other unsplittable entry (which we may make splittable 2085 // later). However, if there are only splittable uses, go ahead and assume 2086 // that we cover the alloca. 2087 // FIXME: We shouldn't consider split slices that happen to start in the 2088 // partition here... 2089 bool WholeAllocaOp = P.empty() && DL.isLegalInteger(SizeInBits); 2090 2091 for (const Slice &S : P) 2092 if (!isIntegerWideningViableForSlice(S, P.beginOffset(), AllocaTy, DL, 2093 WholeAllocaOp)) 2094 return false; 2095 2096 for (const Slice *S : P.splitSliceTails()) 2097 if (!isIntegerWideningViableForSlice(*S, P.beginOffset(), AllocaTy, DL, 2098 WholeAllocaOp)) 2099 return false; 2100 2101 return WholeAllocaOp; 2102 } 2103 2104 static Value *extractInteger(const DataLayout &DL, IRBuilderTy &IRB, Value *V, 2105 IntegerType *Ty, uint64_t Offset, 2106 const Twine &Name) { 2107 LLVM_DEBUG(dbgs() << " start: " << *V << "\n"); 2108 IntegerType *IntTy = cast<IntegerType>(V->getType()); 2109 assert(DL.getTypeStoreSize(Ty).getFixedSize() + Offset <= 2110 DL.getTypeStoreSize(IntTy).getFixedSize() && 2111 "Element extends past full value"); 2112 uint64_t ShAmt = 8 * Offset; 2113 if (DL.isBigEndian()) 2114 ShAmt = 8 * (DL.getTypeStoreSize(IntTy).getFixedSize() - 2115 DL.getTypeStoreSize(Ty).getFixedSize() - Offset); 2116 if (ShAmt) { 2117 V = IRB.CreateLShr(V, ShAmt, Name + ".shift"); 2118 LLVM_DEBUG(dbgs() << " shifted: " << *V << "\n"); 2119 } 2120 assert(Ty->getBitWidth() <= IntTy->getBitWidth() && 2121 "Cannot extract to a larger integer!"); 2122 if (Ty != IntTy) { 2123 V = IRB.CreateTrunc(V, Ty, Name + ".trunc"); 2124 LLVM_DEBUG(dbgs() << " trunced: " << *V << "\n"); 2125 } 2126 return V; 2127 } 2128 2129 static Value *insertInteger(const DataLayout &DL, IRBuilderTy &IRB, Value *Old, 2130 Value *V, uint64_t Offset, const Twine &Name) { 2131 IntegerType *IntTy = cast<IntegerType>(Old->getType()); 2132 IntegerType *Ty = cast<IntegerType>(V->getType()); 2133 assert(Ty->getBitWidth() <= IntTy->getBitWidth() && 2134 "Cannot insert a larger integer!"); 2135 LLVM_DEBUG(dbgs() << " start: " << *V << "\n"); 2136 if (Ty != IntTy) { 2137 V = IRB.CreateZExt(V, IntTy, Name + ".ext"); 2138 LLVM_DEBUG(dbgs() << " extended: " << *V << "\n"); 2139 } 2140 assert(DL.getTypeStoreSize(Ty).getFixedSize() + Offset <= 2141 DL.getTypeStoreSize(IntTy).getFixedSize() && 2142 "Element store outside of alloca store"); 2143 uint64_t ShAmt = 8 * Offset; 2144 if (DL.isBigEndian()) 2145 ShAmt = 8 * (DL.getTypeStoreSize(IntTy).getFixedSize() - 2146 DL.getTypeStoreSize(Ty).getFixedSize() - Offset); 2147 if (ShAmt) { 2148 V = IRB.CreateShl(V, ShAmt, Name + ".shift"); 2149 LLVM_DEBUG(dbgs() << " shifted: " << *V << "\n"); 2150 } 2151 2152 if (ShAmt || Ty->getBitWidth() < IntTy->getBitWidth()) { 2153 APInt Mask = ~Ty->getMask().zext(IntTy->getBitWidth()).shl(ShAmt); 2154 Old = IRB.CreateAnd(Old, Mask, Name + ".mask"); 2155 LLVM_DEBUG(dbgs() << " masked: " << *Old << "\n"); 2156 V = IRB.CreateOr(Old, V, Name + ".insert"); 2157 LLVM_DEBUG(dbgs() << " inserted: " << *V << "\n"); 2158 } 2159 return V; 2160 } 2161 2162 static Value *extractVector(IRBuilderTy &IRB, Value *V, unsigned BeginIndex, 2163 unsigned EndIndex, const Twine &Name) { 2164 auto *VecTy = cast<FixedVectorType>(V->getType()); 2165 unsigned NumElements = EndIndex - BeginIndex; 2166 assert(NumElements <= VecTy->getNumElements() && "Too many elements!"); 2167 2168 if (NumElements == VecTy->getNumElements()) 2169 return V; 2170 2171 if (NumElements == 1) { 2172 V = IRB.CreateExtractElement(V, IRB.getInt32(BeginIndex), 2173 Name + ".extract"); 2174 LLVM_DEBUG(dbgs() << " extract: " << *V << "\n"); 2175 return V; 2176 } 2177 2178 auto Mask = llvm::to_vector<8>(llvm::seq<int>(BeginIndex, EndIndex)); 2179 V = IRB.CreateShuffleVector(V, Mask, Name + ".extract"); 2180 LLVM_DEBUG(dbgs() << " shuffle: " << *V << "\n"); 2181 return V; 2182 } 2183 2184 static Value *insertVector(IRBuilderTy &IRB, Value *Old, Value *V, 2185 unsigned BeginIndex, const Twine &Name) { 2186 VectorType *VecTy = cast<VectorType>(Old->getType()); 2187 assert(VecTy && "Can only insert a vector into a vector"); 2188 2189 VectorType *Ty = dyn_cast<VectorType>(V->getType()); 2190 if (!Ty) { 2191 // Single element to insert. 2192 V = IRB.CreateInsertElement(Old, V, IRB.getInt32(BeginIndex), 2193 Name + ".insert"); 2194 LLVM_DEBUG(dbgs() << " insert: " << *V << "\n"); 2195 return V; 2196 } 2197 2198 assert(cast<FixedVectorType>(Ty)->getNumElements() <= 2199 cast<FixedVectorType>(VecTy)->getNumElements() && 2200 "Too many elements!"); 2201 if (cast<FixedVectorType>(Ty)->getNumElements() == 2202 cast<FixedVectorType>(VecTy)->getNumElements()) { 2203 assert(V->getType() == VecTy && "Vector type mismatch"); 2204 return V; 2205 } 2206 unsigned EndIndex = BeginIndex + cast<FixedVectorType>(Ty)->getNumElements(); 2207 2208 // When inserting a smaller vector into the larger to store, we first 2209 // use a shuffle vector to widen it with undef elements, and then 2210 // a second shuffle vector to select between the loaded vector and the 2211 // incoming vector. 2212 SmallVector<int, 8> Mask; 2213 Mask.reserve(cast<FixedVectorType>(VecTy)->getNumElements()); 2214 for (unsigned i = 0; i != cast<FixedVectorType>(VecTy)->getNumElements(); ++i) 2215 if (i >= BeginIndex && i < EndIndex) 2216 Mask.push_back(i - BeginIndex); 2217 else 2218 Mask.push_back(-1); 2219 V = IRB.CreateShuffleVector(V, Mask, Name + ".expand"); 2220 LLVM_DEBUG(dbgs() << " shuffle: " << *V << "\n"); 2221 2222 SmallVector<Constant *, 8> Mask2; 2223 Mask2.reserve(cast<FixedVectorType>(VecTy)->getNumElements()); 2224 for (unsigned i = 0; i != cast<FixedVectorType>(VecTy)->getNumElements(); ++i) 2225 Mask2.push_back(IRB.getInt1(i >= BeginIndex && i < EndIndex)); 2226 2227 V = IRB.CreateSelect(ConstantVector::get(Mask2), V, Old, Name + "blend"); 2228 2229 LLVM_DEBUG(dbgs() << " blend: " << *V << "\n"); 2230 return V; 2231 } 2232 2233 /// Visitor to rewrite instructions using p particular slice of an alloca 2234 /// to use a new alloca. 2235 /// 2236 /// Also implements the rewriting to vector-based accesses when the partition 2237 /// passes the isVectorPromotionViable predicate. Most of the rewriting logic 2238 /// lives here. 2239 class llvm::sroa::AllocaSliceRewriter 2240 : public InstVisitor<AllocaSliceRewriter, bool> { 2241 // Befriend the base class so it can delegate to private visit methods. 2242 friend class InstVisitor<AllocaSliceRewriter, bool>; 2243 2244 using Base = InstVisitor<AllocaSliceRewriter, bool>; 2245 2246 const DataLayout &DL; 2247 AllocaSlices &AS; 2248 SROAPass &Pass; 2249 AllocaInst &OldAI, &NewAI; 2250 const uint64_t NewAllocaBeginOffset, NewAllocaEndOffset; 2251 Type *NewAllocaTy; 2252 2253 // This is a convenience and flag variable that will be null unless the new 2254 // alloca's integer operations should be widened to this integer type due to 2255 // passing isIntegerWideningViable above. If it is non-null, the desired 2256 // integer type will be stored here for easy access during rewriting. 2257 IntegerType *IntTy; 2258 2259 // If we are rewriting an alloca partition which can be written as pure 2260 // vector operations, we stash extra information here. When VecTy is 2261 // non-null, we have some strict guarantees about the rewritten alloca: 2262 // - The new alloca is exactly the size of the vector type here. 2263 // - The accesses all either map to the entire vector or to a single 2264 // element. 2265 // - The set of accessing instructions is only one of those handled above 2266 // in isVectorPromotionViable. Generally these are the same access kinds 2267 // which are promotable via mem2reg. 2268 VectorType *VecTy; 2269 Type *ElementTy; 2270 uint64_t ElementSize; 2271 2272 // The original offset of the slice currently being rewritten relative to 2273 // the original alloca. 2274 uint64_t BeginOffset = 0; 2275 uint64_t EndOffset = 0; 2276 2277 // The new offsets of the slice currently being rewritten relative to the 2278 // original alloca. 2279 uint64_t NewBeginOffset = 0, NewEndOffset = 0; 2280 2281 uint64_t SliceSize = 0; 2282 bool IsSplittable = false; 2283 bool IsSplit = false; 2284 Use *OldUse = nullptr; 2285 Instruction *OldPtr = nullptr; 2286 2287 // Track post-rewrite users which are PHI nodes and Selects. 2288 SmallSetVector<PHINode *, 8> &PHIUsers; 2289 SmallSetVector<SelectInst *, 8> &SelectUsers; 2290 2291 // Utility IR builder, whose name prefix is setup for each visited use, and 2292 // the insertion point is set to point to the user. 2293 IRBuilderTy IRB; 2294 2295 public: 2296 AllocaSliceRewriter(const DataLayout &DL, AllocaSlices &AS, SROAPass &Pass, 2297 AllocaInst &OldAI, AllocaInst &NewAI, 2298 uint64_t NewAllocaBeginOffset, 2299 uint64_t NewAllocaEndOffset, bool IsIntegerPromotable, 2300 VectorType *PromotableVecTy, 2301 SmallSetVector<PHINode *, 8> &PHIUsers, 2302 SmallSetVector<SelectInst *, 8> &SelectUsers) 2303 : DL(DL), AS(AS), Pass(Pass), OldAI(OldAI), NewAI(NewAI), 2304 NewAllocaBeginOffset(NewAllocaBeginOffset), 2305 NewAllocaEndOffset(NewAllocaEndOffset), 2306 NewAllocaTy(NewAI.getAllocatedType()), 2307 IntTy( 2308 IsIntegerPromotable 2309 ? Type::getIntNTy(NewAI.getContext(), 2310 DL.getTypeSizeInBits(NewAI.getAllocatedType()) 2311 .getFixedSize()) 2312 : nullptr), 2313 VecTy(PromotableVecTy), 2314 ElementTy(VecTy ? VecTy->getElementType() : nullptr), 2315 ElementSize(VecTy ? DL.getTypeSizeInBits(ElementTy).getFixedSize() / 8 2316 : 0), 2317 PHIUsers(PHIUsers), SelectUsers(SelectUsers), 2318 IRB(NewAI.getContext(), ConstantFolder()) { 2319 if (VecTy) { 2320 assert((DL.getTypeSizeInBits(ElementTy).getFixedSize() % 8) == 0 && 2321 "Only multiple-of-8 sized vector elements are viable"); 2322 ++NumVectorized; 2323 } 2324 assert((!IntTy && !VecTy) || (IntTy && !VecTy) || (!IntTy && VecTy)); 2325 } 2326 2327 bool visit(AllocaSlices::const_iterator I) { 2328 bool CanSROA = true; 2329 BeginOffset = I->beginOffset(); 2330 EndOffset = I->endOffset(); 2331 IsSplittable = I->isSplittable(); 2332 IsSplit = 2333 BeginOffset < NewAllocaBeginOffset || EndOffset > NewAllocaEndOffset; 2334 LLVM_DEBUG(dbgs() << " rewriting " << (IsSplit ? "split " : "")); 2335 LLVM_DEBUG(AS.printSlice(dbgs(), I, "")); 2336 LLVM_DEBUG(dbgs() << "\n"); 2337 2338 // Compute the intersecting offset range. 2339 assert(BeginOffset < NewAllocaEndOffset); 2340 assert(EndOffset > NewAllocaBeginOffset); 2341 NewBeginOffset = std::max(BeginOffset, NewAllocaBeginOffset); 2342 NewEndOffset = std::min(EndOffset, NewAllocaEndOffset); 2343 2344 SliceSize = NewEndOffset - NewBeginOffset; 2345 2346 OldUse = I->getUse(); 2347 OldPtr = cast<Instruction>(OldUse->get()); 2348 2349 Instruction *OldUserI = cast<Instruction>(OldUse->getUser()); 2350 IRB.SetInsertPoint(OldUserI); 2351 IRB.SetCurrentDebugLocation(OldUserI->getDebugLoc()); 2352 IRB.getInserter().SetNamePrefix( 2353 Twine(NewAI.getName()) + "." + Twine(BeginOffset) + "."); 2354 2355 CanSROA &= visit(cast<Instruction>(OldUse->getUser())); 2356 if (VecTy || IntTy) 2357 assert(CanSROA); 2358 return CanSROA; 2359 } 2360 2361 private: 2362 // Make sure the other visit overloads are visible. 2363 using Base::visit; 2364 2365 // Every instruction which can end up as a user must have a rewrite rule. 2366 bool visitInstruction(Instruction &I) { 2367 LLVM_DEBUG(dbgs() << " !!!! Cannot rewrite: " << I << "\n"); 2368 llvm_unreachable("No rewrite rule for this instruction!"); 2369 } 2370 2371 Value *getNewAllocaSlicePtr(IRBuilderTy &IRB, Type *PointerTy) { 2372 // Note that the offset computation can use BeginOffset or NewBeginOffset 2373 // interchangeably for unsplit slices. 2374 assert(IsSplit || BeginOffset == NewBeginOffset); 2375 uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset; 2376 2377 #ifndef NDEBUG 2378 StringRef OldName = OldPtr->getName(); 2379 // Skip through the last '.sroa.' component of the name. 2380 size_t LastSROAPrefix = OldName.rfind(".sroa."); 2381 if (LastSROAPrefix != StringRef::npos) { 2382 OldName = OldName.substr(LastSROAPrefix + strlen(".sroa.")); 2383 // Look for an SROA slice index. 2384 size_t IndexEnd = OldName.find_first_not_of("0123456789"); 2385 if (IndexEnd != StringRef::npos && OldName[IndexEnd] == '.') { 2386 // Strip the index and look for the offset. 2387 OldName = OldName.substr(IndexEnd + 1); 2388 size_t OffsetEnd = OldName.find_first_not_of("0123456789"); 2389 if (OffsetEnd != StringRef::npos && OldName[OffsetEnd] == '.') 2390 // Strip the offset. 2391 OldName = OldName.substr(OffsetEnd + 1); 2392 } 2393 } 2394 // Strip any SROA suffixes as well. 2395 OldName = OldName.substr(0, OldName.find(".sroa_")); 2396 #endif 2397 2398 return getAdjustedPtr(IRB, DL, &NewAI, 2399 APInt(DL.getIndexTypeSizeInBits(PointerTy), Offset), 2400 PointerTy, 2401 #ifndef NDEBUG 2402 Twine(OldName) + "." 2403 #else 2404 Twine() 2405 #endif 2406 ); 2407 } 2408 2409 /// Compute suitable alignment to access this slice of the *new* 2410 /// alloca. 2411 /// 2412 /// You can optionally pass a type to this routine and if that type's ABI 2413 /// alignment is itself suitable, this will return zero. 2414 Align getSliceAlign() { 2415 return commonAlignment(NewAI.getAlign(), 2416 NewBeginOffset - NewAllocaBeginOffset); 2417 } 2418 2419 unsigned getIndex(uint64_t Offset) { 2420 assert(VecTy && "Can only call getIndex when rewriting a vector"); 2421 uint64_t RelOffset = Offset - NewAllocaBeginOffset; 2422 assert(RelOffset / ElementSize < UINT32_MAX && "Index out of bounds"); 2423 uint32_t Index = RelOffset / ElementSize; 2424 assert(Index * ElementSize == RelOffset); 2425 return Index; 2426 } 2427 2428 void deleteIfTriviallyDead(Value *V) { 2429 Instruction *I = cast<Instruction>(V); 2430 if (isInstructionTriviallyDead(I)) 2431 Pass.DeadInsts.push_back(I); 2432 } 2433 2434 Value *rewriteVectorizedLoadInst(LoadInst &LI) { 2435 unsigned BeginIndex = getIndex(NewBeginOffset); 2436 unsigned EndIndex = getIndex(NewEndOffset); 2437 assert(EndIndex > BeginIndex && "Empty vector!"); 2438 2439 LoadInst *Load = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI, 2440 NewAI.getAlign(), "load"); 2441 2442 Load->copyMetadata(LI, {LLVMContext::MD_mem_parallel_loop_access, 2443 LLVMContext::MD_access_group}); 2444 return extractVector(IRB, Load, BeginIndex, EndIndex, "vec"); 2445 } 2446 2447 Value *rewriteIntegerLoad(LoadInst &LI) { 2448 assert(IntTy && "We cannot insert an integer to the alloca"); 2449 assert(!LI.isVolatile()); 2450 Value *V = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI, 2451 NewAI.getAlign(), "load"); 2452 V = convertValue(DL, IRB, V, IntTy); 2453 assert(NewBeginOffset >= NewAllocaBeginOffset && "Out of bounds offset"); 2454 uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset; 2455 if (Offset > 0 || NewEndOffset < NewAllocaEndOffset) { 2456 IntegerType *ExtractTy = Type::getIntNTy(LI.getContext(), SliceSize * 8); 2457 V = extractInteger(DL, IRB, V, ExtractTy, Offset, "extract"); 2458 } 2459 // It is possible that the extracted type is not the load type. This 2460 // happens if there is a load past the end of the alloca, and as 2461 // a consequence the slice is narrower but still a candidate for integer 2462 // lowering. To handle this case, we just zero extend the extracted 2463 // integer. 2464 assert(cast<IntegerType>(LI.getType())->getBitWidth() >= SliceSize * 8 && 2465 "Can only handle an extract for an overly wide load"); 2466 if (cast<IntegerType>(LI.getType())->getBitWidth() > SliceSize * 8) 2467 V = IRB.CreateZExt(V, LI.getType()); 2468 return V; 2469 } 2470 2471 bool visitLoadInst(LoadInst &LI) { 2472 LLVM_DEBUG(dbgs() << " original: " << LI << "\n"); 2473 Value *OldOp = LI.getOperand(0); 2474 assert(OldOp == OldPtr); 2475 2476 AAMDNodes AATags = LI.getAAMetadata(); 2477 2478 unsigned AS = LI.getPointerAddressSpace(); 2479 2480 Type *TargetTy = IsSplit ? Type::getIntNTy(LI.getContext(), SliceSize * 8) 2481 : LI.getType(); 2482 const bool IsLoadPastEnd = 2483 DL.getTypeStoreSize(TargetTy).getFixedSize() > SliceSize; 2484 bool IsPtrAdjusted = false; 2485 Value *V; 2486 if (VecTy) { 2487 V = rewriteVectorizedLoadInst(LI); 2488 } else if (IntTy && LI.getType()->isIntegerTy()) { 2489 V = rewriteIntegerLoad(LI); 2490 } else if (NewBeginOffset == NewAllocaBeginOffset && 2491 NewEndOffset == NewAllocaEndOffset && 2492 (canConvertValue(DL, NewAllocaTy, TargetTy) || 2493 (IsLoadPastEnd && NewAllocaTy->isIntegerTy() && 2494 TargetTy->isIntegerTy()))) { 2495 LoadInst *NewLI = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI, 2496 NewAI.getAlign(), LI.isVolatile(), 2497 LI.getName()); 2498 if (AATags) 2499 NewLI->setAAMetadata(AATags.shift(NewBeginOffset - BeginOffset)); 2500 if (LI.isVolatile()) 2501 NewLI->setAtomic(LI.getOrdering(), LI.getSyncScopeID()); 2502 if (NewLI->isAtomic()) 2503 NewLI->setAlignment(LI.getAlign()); 2504 2505 // Any !nonnull metadata or !range metadata on the old load is also valid 2506 // on the new load. This is even true in some cases even when the loads 2507 // are different types, for example by mapping !nonnull metadata to 2508 // !range metadata by modeling the null pointer constant converted to the 2509 // integer type. 2510 // FIXME: Add support for range metadata here. Currently the utilities 2511 // for this don't propagate range metadata in trivial cases from one 2512 // integer load to another, don't handle non-addrspace-0 null pointers 2513 // correctly, and don't have any support for mapping ranges as the 2514 // integer type becomes winder or narrower. 2515 if (MDNode *N = LI.getMetadata(LLVMContext::MD_nonnull)) 2516 copyNonnullMetadata(LI, N, *NewLI); 2517 2518 // Try to preserve nonnull metadata 2519 V = NewLI; 2520 2521 // If this is an integer load past the end of the slice (which means the 2522 // bytes outside the slice are undef or this load is dead) just forcibly 2523 // fix the integer size with correct handling of endianness. 2524 if (auto *AITy = dyn_cast<IntegerType>(NewAllocaTy)) 2525 if (auto *TITy = dyn_cast<IntegerType>(TargetTy)) 2526 if (AITy->getBitWidth() < TITy->getBitWidth()) { 2527 V = IRB.CreateZExt(V, TITy, "load.ext"); 2528 if (DL.isBigEndian()) 2529 V = IRB.CreateShl(V, TITy->getBitWidth() - AITy->getBitWidth(), 2530 "endian_shift"); 2531 } 2532 } else { 2533 Type *LTy = TargetTy->getPointerTo(AS); 2534 LoadInst *NewLI = 2535 IRB.CreateAlignedLoad(TargetTy, getNewAllocaSlicePtr(IRB, LTy), 2536 getSliceAlign(), LI.isVolatile(), LI.getName()); 2537 if (AATags) 2538 NewLI->setAAMetadata(AATags.shift(NewBeginOffset - BeginOffset)); 2539 if (LI.isVolatile()) 2540 NewLI->setAtomic(LI.getOrdering(), LI.getSyncScopeID()); 2541 NewLI->copyMetadata(LI, {LLVMContext::MD_mem_parallel_loop_access, 2542 LLVMContext::MD_access_group}); 2543 2544 V = NewLI; 2545 IsPtrAdjusted = true; 2546 } 2547 V = convertValue(DL, IRB, V, TargetTy); 2548 2549 if (IsSplit) { 2550 assert(!LI.isVolatile()); 2551 assert(LI.getType()->isIntegerTy() && 2552 "Only integer type loads and stores are split"); 2553 assert(SliceSize < DL.getTypeStoreSize(LI.getType()).getFixedSize() && 2554 "Split load isn't smaller than original load"); 2555 assert(DL.typeSizeEqualsStoreSize(LI.getType()) && 2556 "Non-byte-multiple bit width"); 2557 // Move the insertion point just past the load so that we can refer to it. 2558 IRB.SetInsertPoint(&*std::next(BasicBlock::iterator(&LI))); 2559 // Create a placeholder value with the same type as LI to use as the 2560 // basis for the new value. This allows us to replace the uses of LI with 2561 // the computed value, and then replace the placeholder with LI, leaving 2562 // LI only used for this computation. 2563 Value *Placeholder = new LoadInst( 2564 LI.getType(), PoisonValue::get(LI.getType()->getPointerTo(AS)), "", 2565 false, Align(1)); 2566 V = insertInteger(DL, IRB, Placeholder, V, NewBeginOffset - BeginOffset, 2567 "insert"); 2568 LI.replaceAllUsesWith(V); 2569 Placeholder->replaceAllUsesWith(&LI); 2570 Placeholder->deleteValue(); 2571 } else { 2572 LI.replaceAllUsesWith(V); 2573 } 2574 2575 Pass.DeadInsts.push_back(&LI); 2576 deleteIfTriviallyDead(OldOp); 2577 LLVM_DEBUG(dbgs() << " to: " << *V << "\n"); 2578 return !LI.isVolatile() && !IsPtrAdjusted; 2579 } 2580 2581 bool rewriteVectorizedStoreInst(Value *V, StoreInst &SI, Value *OldOp, 2582 AAMDNodes AATags) { 2583 if (V->getType() != VecTy) { 2584 unsigned BeginIndex = getIndex(NewBeginOffset); 2585 unsigned EndIndex = getIndex(NewEndOffset); 2586 assert(EndIndex > BeginIndex && "Empty vector!"); 2587 unsigned NumElements = EndIndex - BeginIndex; 2588 assert(NumElements <= cast<FixedVectorType>(VecTy)->getNumElements() && 2589 "Too many elements!"); 2590 Type *SliceTy = (NumElements == 1) 2591 ? ElementTy 2592 : FixedVectorType::get(ElementTy, NumElements); 2593 if (V->getType() != SliceTy) 2594 V = convertValue(DL, IRB, V, SliceTy); 2595 2596 // Mix in the existing elements. 2597 Value *Old = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI, 2598 NewAI.getAlign(), "load"); 2599 V = insertVector(IRB, Old, V, BeginIndex, "vec"); 2600 } 2601 StoreInst *Store = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlign()); 2602 Store->copyMetadata(SI, {LLVMContext::MD_mem_parallel_loop_access, 2603 LLVMContext::MD_access_group}); 2604 if (AATags) 2605 Store->setAAMetadata(AATags.shift(NewBeginOffset - BeginOffset)); 2606 Pass.DeadInsts.push_back(&SI); 2607 2608 LLVM_DEBUG(dbgs() << " to: " << *Store << "\n"); 2609 return true; 2610 } 2611 2612 bool rewriteIntegerStore(Value *V, StoreInst &SI, AAMDNodes AATags) { 2613 assert(IntTy && "We cannot extract an integer from the alloca"); 2614 assert(!SI.isVolatile()); 2615 if (DL.getTypeSizeInBits(V->getType()).getFixedSize() != 2616 IntTy->getBitWidth()) { 2617 Value *Old = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI, 2618 NewAI.getAlign(), "oldload"); 2619 Old = convertValue(DL, IRB, Old, IntTy); 2620 assert(BeginOffset >= NewAllocaBeginOffset && "Out of bounds offset"); 2621 uint64_t Offset = BeginOffset - NewAllocaBeginOffset; 2622 V = insertInteger(DL, IRB, Old, SI.getValueOperand(), Offset, "insert"); 2623 } 2624 V = convertValue(DL, IRB, V, NewAllocaTy); 2625 StoreInst *Store = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlign()); 2626 Store->copyMetadata(SI, {LLVMContext::MD_mem_parallel_loop_access, 2627 LLVMContext::MD_access_group}); 2628 if (AATags) 2629 Store->setAAMetadata(AATags.shift(NewBeginOffset - BeginOffset)); 2630 Pass.DeadInsts.push_back(&SI); 2631 LLVM_DEBUG(dbgs() << " to: " << *Store << "\n"); 2632 return true; 2633 } 2634 2635 bool visitStoreInst(StoreInst &SI) { 2636 LLVM_DEBUG(dbgs() << " original: " << SI << "\n"); 2637 Value *OldOp = SI.getOperand(1); 2638 assert(OldOp == OldPtr); 2639 2640 AAMDNodes AATags = SI.getAAMetadata(); 2641 Value *V = SI.getValueOperand(); 2642 2643 // Strip all inbounds GEPs and pointer casts to try to dig out any root 2644 // alloca that should be re-examined after promoting this alloca. 2645 if (V->getType()->isPointerTy()) 2646 if (AllocaInst *AI = dyn_cast<AllocaInst>(V->stripInBoundsOffsets())) 2647 Pass.PostPromotionWorklist.insert(AI); 2648 2649 if (SliceSize < DL.getTypeStoreSize(V->getType()).getFixedSize()) { 2650 assert(!SI.isVolatile()); 2651 assert(V->getType()->isIntegerTy() && 2652 "Only integer type loads and stores are split"); 2653 assert(DL.typeSizeEqualsStoreSize(V->getType()) && 2654 "Non-byte-multiple bit width"); 2655 IntegerType *NarrowTy = Type::getIntNTy(SI.getContext(), SliceSize * 8); 2656 V = extractInteger(DL, IRB, V, NarrowTy, NewBeginOffset - BeginOffset, 2657 "extract"); 2658 } 2659 2660 if (VecTy) 2661 return rewriteVectorizedStoreInst(V, SI, OldOp, AATags); 2662 if (IntTy && V->getType()->isIntegerTy()) 2663 return rewriteIntegerStore(V, SI, AATags); 2664 2665 const bool IsStorePastEnd = 2666 DL.getTypeStoreSize(V->getType()).getFixedSize() > SliceSize; 2667 StoreInst *NewSI; 2668 if (NewBeginOffset == NewAllocaBeginOffset && 2669 NewEndOffset == NewAllocaEndOffset && 2670 (canConvertValue(DL, V->getType(), NewAllocaTy) || 2671 (IsStorePastEnd && NewAllocaTy->isIntegerTy() && 2672 V->getType()->isIntegerTy()))) { 2673 // If this is an integer store past the end of slice (and thus the bytes 2674 // past that point are irrelevant or this is unreachable), truncate the 2675 // value prior to storing. 2676 if (auto *VITy = dyn_cast<IntegerType>(V->getType())) 2677 if (auto *AITy = dyn_cast<IntegerType>(NewAllocaTy)) 2678 if (VITy->getBitWidth() > AITy->getBitWidth()) { 2679 if (DL.isBigEndian()) 2680 V = IRB.CreateLShr(V, VITy->getBitWidth() - AITy->getBitWidth(), 2681 "endian_shift"); 2682 V = IRB.CreateTrunc(V, AITy, "load.trunc"); 2683 } 2684 2685 V = convertValue(DL, IRB, V, NewAllocaTy); 2686 NewSI = 2687 IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlign(), SI.isVolatile()); 2688 } else { 2689 unsigned AS = SI.getPointerAddressSpace(); 2690 Value *NewPtr = getNewAllocaSlicePtr(IRB, V->getType()->getPointerTo(AS)); 2691 NewSI = 2692 IRB.CreateAlignedStore(V, NewPtr, getSliceAlign(), SI.isVolatile()); 2693 } 2694 NewSI->copyMetadata(SI, {LLVMContext::MD_mem_parallel_loop_access, 2695 LLVMContext::MD_access_group}); 2696 if (AATags) 2697 NewSI->setAAMetadata(AATags.shift(NewBeginOffset - BeginOffset)); 2698 if (SI.isVolatile()) 2699 NewSI->setAtomic(SI.getOrdering(), SI.getSyncScopeID()); 2700 if (NewSI->isAtomic()) 2701 NewSI->setAlignment(SI.getAlign()); 2702 Pass.DeadInsts.push_back(&SI); 2703 deleteIfTriviallyDead(OldOp); 2704 2705 LLVM_DEBUG(dbgs() << " to: " << *NewSI << "\n"); 2706 return NewSI->getPointerOperand() == &NewAI && 2707 NewSI->getValueOperand()->getType() == NewAllocaTy && 2708 !SI.isVolatile(); 2709 } 2710 2711 /// Compute an integer value from splatting an i8 across the given 2712 /// number of bytes. 2713 /// 2714 /// Note that this routine assumes an i8 is a byte. If that isn't true, don't 2715 /// call this routine. 2716 /// FIXME: Heed the advice above. 2717 /// 2718 /// \param V The i8 value to splat. 2719 /// \param Size The number of bytes in the output (assuming i8 is one byte) 2720 Value *getIntegerSplat(Value *V, unsigned Size) { 2721 assert(Size > 0 && "Expected a positive number of bytes."); 2722 IntegerType *VTy = cast<IntegerType>(V->getType()); 2723 assert(VTy->getBitWidth() == 8 && "Expected an i8 value for the byte"); 2724 if (Size == 1) 2725 return V; 2726 2727 Type *SplatIntTy = Type::getIntNTy(VTy->getContext(), Size * 8); 2728 V = IRB.CreateMul( 2729 IRB.CreateZExt(V, SplatIntTy, "zext"), 2730 ConstantExpr::getUDiv( 2731 Constant::getAllOnesValue(SplatIntTy), 2732 ConstantExpr::getZExt(Constant::getAllOnesValue(V->getType()), 2733 SplatIntTy)), 2734 "isplat"); 2735 return V; 2736 } 2737 2738 /// Compute a vector splat for a given element value. 2739 Value *getVectorSplat(Value *V, unsigned NumElements) { 2740 V = IRB.CreateVectorSplat(NumElements, V, "vsplat"); 2741 LLVM_DEBUG(dbgs() << " splat: " << *V << "\n"); 2742 return V; 2743 } 2744 2745 bool visitMemSetInst(MemSetInst &II) { 2746 LLVM_DEBUG(dbgs() << " original: " << II << "\n"); 2747 assert(II.getRawDest() == OldPtr); 2748 2749 AAMDNodes AATags = II.getAAMetadata(); 2750 2751 // If the memset has a variable size, it cannot be split, just adjust the 2752 // pointer to the new alloca. 2753 if (!isa<ConstantInt>(II.getLength())) { 2754 assert(!IsSplit); 2755 assert(NewBeginOffset == BeginOffset); 2756 II.setDest(getNewAllocaSlicePtr(IRB, OldPtr->getType())); 2757 II.setDestAlignment(getSliceAlign()); 2758 2759 deleteIfTriviallyDead(OldPtr); 2760 return false; 2761 } 2762 2763 // Record this instruction for deletion. 2764 Pass.DeadInsts.push_back(&II); 2765 2766 Type *AllocaTy = NewAI.getAllocatedType(); 2767 Type *ScalarTy = AllocaTy->getScalarType(); 2768 2769 const bool CanContinue = [&]() { 2770 if (VecTy || IntTy) 2771 return true; 2772 if (BeginOffset > NewAllocaBeginOffset || 2773 EndOffset < NewAllocaEndOffset) 2774 return false; 2775 // Length must be in range for FixedVectorType. 2776 auto *C = cast<ConstantInt>(II.getLength()); 2777 const uint64_t Len = C->getLimitedValue(); 2778 if (Len > std::numeric_limits<unsigned>::max()) 2779 return false; 2780 auto *Int8Ty = IntegerType::getInt8Ty(NewAI.getContext()); 2781 auto *SrcTy = FixedVectorType::get(Int8Ty, Len); 2782 return canConvertValue(DL, SrcTy, AllocaTy) && 2783 DL.isLegalInteger(DL.getTypeSizeInBits(ScalarTy).getFixedSize()); 2784 }(); 2785 2786 // If this doesn't map cleanly onto the alloca type, and that type isn't 2787 // a single value type, just emit a memset. 2788 if (!CanContinue) { 2789 Type *SizeTy = II.getLength()->getType(); 2790 Constant *Size = ConstantInt::get(SizeTy, NewEndOffset - NewBeginOffset); 2791 CallInst *New = IRB.CreateMemSet( 2792 getNewAllocaSlicePtr(IRB, OldPtr->getType()), II.getValue(), Size, 2793 MaybeAlign(getSliceAlign()), II.isVolatile()); 2794 if (AATags) 2795 New->setAAMetadata(AATags.shift(NewBeginOffset - BeginOffset)); 2796 LLVM_DEBUG(dbgs() << " to: " << *New << "\n"); 2797 return false; 2798 } 2799 2800 // If we can represent this as a simple value, we have to build the actual 2801 // value to store, which requires expanding the byte present in memset to 2802 // a sensible representation for the alloca type. This is essentially 2803 // splatting the byte to a sufficiently wide integer, splatting it across 2804 // any desired vector width, and bitcasting to the final type. 2805 Value *V; 2806 2807 if (VecTy) { 2808 // If this is a memset of a vectorized alloca, insert it. 2809 assert(ElementTy == ScalarTy); 2810 2811 unsigned BeginIndex = getIndex(NewBeginOffset); 2812 unsigned EndIndex = getIndex(NewEndOffset); 2813 assert(EndIndex > BeginIndex && "Empty vector!"); 2814 unsigned NumElements = EndIndex - BeginIndex; 2815 assert(NumElements <= cast<FixedVectorType>(VecTy)->getNumElements() && 2816 "Too many elements!"); 2817 2818 Value *Splat = getIntegerSplat( 2819 II.getValue(), DL.getTypeSizeInBits(ElementTy).getFixedSize() / 8); 2820 Splat = convertValue(DL, IRB, Splat, ElementTy); 2821 if (NumElements > 1) 2822 Splat = getVectorSplat(Splat, NumElements); 2823 2824 Value *Old = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI, 2825 NewAI.getAlign(), "oldload"); 2826 V = insertVector(IRB, Old, Splat, BeginIndex, "vec"); 2827 } else if (IntTy) { 2828 // If this is a memset on an alloca where we can widen stores, insert the 2829 // set integer. 2830 assert(!II.isVolatile()); 2831 2832 uint64_t Size = NewEndOffset - NewBeginOffset; 2833 V = getIntegerSplat(II.getValue(), Size); 2834 2835 if (IntTy && (BeginOffset != NewAllocaBeginOffset || 2836 EndOffset != NewAllocaBeginOffset)) { 2837 Value *Old = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI, 2838 NewAI.getAlign(), "oldload"); 2839 Old = convertValue(DL, IRB, Old, IntTy); 2840 uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset; 2841 V = insertInteger(DL, IRB, Old, V, Offset, "insert"); 2842 } else { 2843 assert(V->getType() == IntTy && 2844 "Wrong type for an alloca wide integer!"); 2845 } 2846 V = convertValue(DL, IRB, V, AllocaTy); 2847 } else { 2848 // Established these invariants above. 2849 assert(NewBeginOffset == NewAllocaBeginOffset); 2850 assert(NewEndOffset == NewAllocaEndOffset); 2851 2852 V = getIntegerSplat(II.getValue(), 2853 DL.getTypeSizeInBits(ScalarTy).getFixedSize() / 8); 2854 if (VectorType *AllocaVecTy = dyn_cast<VectorType>(AllocaTy)) 2855 V = getVectorSplat( 2856 V, cast<FixedVectorType>(AllocaVecTy)->getNumElements()); 2857 2858 V = convertValue(DL, IRB, V, AllocaTy); 2859 } 2860 2861 StoreInst *New = 2862 IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlign(), II.isVolatile()); 2863 New->copyMetadata(II, {LLVMContext::MD_mem_parallel_loop_access, 2864 LLVMContext::MD_access_group}); 2865 if (AATags) 2866 New->setAAMetadata(AATags.shift(NewBeginOffset - BeginOffset)); 2867 LLVM_DEBUG(dbgs() << " to: " << *New << "\n"); 2868 return !II.isVolatile(); 2869 } 2870 2871 bool visitMemTransferInst(MemTransferInst &II) { 2872 // Rewriting of memory transfer instructions can be a bit tricky. We break 2873 // them into two categories: split intrinsics and unsplit intrinsics. 2874 2875 LLVM_DEBUG(dbgs() << " original: " << II << "\n"); 2876 2877 AAMDNodes AATags = II.getAAMetadata(); 2878 2879 bool IsDest = &II.getRawDestUse() == OldUse; 2880 assert((IsDest && II.getRawDest() == OldPtr) || 2881 (!IsDest && II.getRawSource() == OldPtr)); 2882 2883 MaybeAlign SliceAlign = getSliceAlign(); 2884 2885 // For unsplit intrinsics, we simply modify the source and destination 2886 // pointers in place. This isn't just an optimization, it is a matter of 2887 // correctness. With unsplit intrinsics we may be dealing with transfers 2888 // within a single alloca before SROA ran, or with transfers that have 2889 // a variable length. We may also be dealing with memmove instead of 2890 // memcpy, and so simply updating the pointers is the necessary for us to 2891 // update both source and dest of a single call. 2892 if (!IsSplittable) { 2893 Value *AdjustedPtr = getNewAllocaSlicePtr(IRB, OldPtr->getType()); 2894 if (IsDest) { 2895 II.setDest(AdjustedPtr); 2896 II.setDestAlignment(SliceAlign); 2897 } 2898 else { 2899 II.setSource(AdjustedPtr); 2900 II.setSourceAlignment(SliceAlign); 2901 } 2902 2903 LLVM_DEBUG(dbgs() << " to: " << II << "\n"); 2904 deleteIfTriviallyDead(OldPtr); 2905 return false; 2906 } 2907 // For split transfer intrinsics we have an incredibly useful assurance: 2908 // the source and destination do not reside within the same alloca, and at 2909 // least one of them does not escape. This means that we can replace 2910 // memmove with memcpy, and we don't need to worry about all manner of 2911 // downsides to splitting and transforming the operations. 2912 2913 // If this doesn't map cleanly onto the alloca type, and that type isn't 2914 // a single value type, just emit a memcpy. 2915 bool EmitMemCpy = 2916 !VecTy && !IntTy && 2917 (BeginOffset > NewAllocaBeginOffset || EndOffset < NewAllocaEndOffset || 2918 SliceSize != 2919 DL.getTypeStoreSize(NewAI.getAllocatedType()).getFixedSize() || 2920 !NewAI.getAllocatedType()->isSingleValueType()); 2921 2922 // If we're just going to emit a memcpy, the alloca hasn't changed, and the 2923 // size hasn't been shrunk based on analysis of the viable range, this is 2924 // a no-op. 2925 if (EmitMemCpy && &OldAI == &NewAI) { 2926 // Ensure the start lines up. 2927 assert(NewBeginOffset == BeginOffset); 2928 2929 // Rewrite the size as needed. 2930 if (NewEndOffset != EndOffset) 2931 II.setLength(ConstantInt::get(II.getLength()->getType(), 2932 NewEndOffset - NewBeginOffset)); 2933 return false; 2934 } 2935 // Record this instruction for deletion. 2936 Pass.DeadInsts.push_back(&II); 2937 2938 // Strip all inbounds GEPs and pointer casts to try to dig out any root 2939 // alloca that should be re-examined after rewriting this instruction. 2940 Value *OtherPtr = IsDest ? II.getRawSource() : II.getRawDest(); 2941 if (AllocaInst *AI = 2942 dyn_cast<AllocaInst>(OtherPtr->stripInBoundsOffsets())) { 2943 assert(AI != &OldAI && AI != &NewAI && 2944 "Splittable transfers cannot reach the same alloca on both ends."); 2945 Pass.Worklist.insert(AI); 2946 } 2947 2948 Type *OtherPtrTy = OtherPtr->getType(); 2949 unsigned OtherAS = OtherPtrTy->getPointerAddressSpace(); 2950 2951 // Compute the relative offset for the other pointer within the transfer. 2952 unsigned OffsetWidth = DL.getIndexSizeInBits(OtherAS); 2953 APInt OtherOffset(OffsetWidth, NewBeginOffset - BeginOffset); 2954 Align OtherAlign = 2955 (IsDest ? II.getSourceAlign() : II.getDestAlign()).valueOrOne(); 2956 OtherAlign = 2957 commonAlignment(OtherAlign, OtherOffset.zextOrTrunc(64).getZExtValue()); 2958 2959 if (EmitMemCpy) { 2960 // Compute the other pointer, folding as much as possible to produce 2961 // a single, simple GEP in most cases. 2962 OtherPtr = getAdjustedPtr(IRB, DL, OtherPtr, OtherOffset, OtherPtrTy, 2963 OtherPtr->getName() + "."); 2964 2965 Value *OurPtr = getNewAllocaSlicePtr(IRB, OldPtr->getType()); 2966 Type *SizeTy = II.getLength()->getType(); 2967 Constant *Size = ConstantInt::get(SizeTy, NewEndOffset - NewBeginOffset); 2968 2969 Value *DestPtr, *SrcPtr; 2970 MaybeAlign DestAlign, SrcAlign; 2971 // Note: IsDest is true iff we're copying into the new alloca slice 2972 if (IsDest) { 2973 DestPtr = OurPtr; 2974 DestAlign = SliceAlign; 2975 SrcPtr = OtherPtr; 2976 SrcAlign = OtherAlign; 2977 } else { 2978 DestPtr = OtherPtr; 2979 DestAlign = OtherAlign; 2980 SrcPtr = OurPtr; 2981 SrcAlign = SliceAlign; 2982 } 2983 CallInst *New = IRB.CreateMemCpy(DestPtr, DestAlign, SrcPtr, SrcAlign, 2984 Size, II.isVolatile()); 2985 if (AATags) 2986 New->setAAMetadata(AATags.shift(NewBeginOffset - BeginOffset)); 2987 LLVM_DEBUG(dbgs() << " to: " << *New << "\n"); 2988 return false; 2989 } 2990 2991 bool IsWholeAlloca = NewBeginOffset == NewAllocaBeginOffset && 2992 NewEndOffset == NewAllocaEndOffset; 2993 uint64_t Size = NewEndOffset - NewBeginOffset; 2994 unsigned BeginIndex = VecTy ? getIndex(NewBeginOffset) : 0; 2995 unsigned EndIndex = VecTy ? getIndex(NewEndOffset) : 0; 2996 unsigned NumElements = EndIndex - BeginIndex; 2997 IntegerType *SubIntTy = 2998 IntTy ? Type::getIntNTy(IntTy->getContext(), Size * 8) : nullptr; 2999 3000 // Reset the other pointer type to match the register type we're going to 3001 // use, but using the address space of the original other pointer. 3002 Type *OtherTy; 3003 if (VecTy && !IsWholeAlloca) { 3004 if (NumElements == 1) 3005 OtherTy = VecTy->getElementType(); 3006 else 3007 OtherTy = FixedVectorType::get(VecTy->getElementType(), NumElements); 3008 } else if (IntTy && !IsWholeAlloca) { 3009 OtherTy = SubIntTy; 3010 } else { 3011 OtherTy = NewAllocaTy; 3012 } 3013 OtherPtrTy = OtherTy->getPointerTo(OtherAS); 3014 3015 Value *SrcPtr = getAdjustedPtr(IRB, DL, OtherPtr, OtherOffset, OtherPtrTy, 3016 OtherPtr->getName() + "."); 3017 MaybeAlign SrcAlign = OtherAlign; 3018 Value *DstPtr = &NewAI; 3019 MaybeAlign DstAlign = SliceAlign; 3020 if (!IsDest) { 3021 std::swap(SrcPtr, DstPtr); 3022 std::swap(SrcAlign, DstAlign); 3023 } 3024 3025 Value *Src; 3026 if (VecTy && !IsWholeAlloca && !IsDest) { 3027 Src = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI, 3028 NewAI.getAlign(), "load"); 3029 Src = extractVector(IRB, Src, BeginIndex, EndIndex, "vec"); 3030 } else if (IntTy && !IsWholeAlloca && !IsDest) { 3031 Src = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI, 3032 NewAI.getAlign(), "load"); 3033 Src = convertValue(DL, IRB, Src, IntTy); 3034 uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset; 3035 Src = extractInteger(DL, IRB, Src, SubIntTy, Offset, "extract"); 3036 } else { 3037 LoadInst *Load = IRB.CreateAlignedLoad(OtherTy, SrcPtr, SrcAlign, 3038 II.isVolatile(), "copyload"); 3039 Load->copyMetadata(II, {LLVMContext::MD_mem_parallel_loop_access, 3040 LLVMContext::MD_access_group}); 3041 if (AATags) 3042 Load->setAAMetadata(AATags.shift(NewBeginOffset - BeginOffset)); 3043 Src = Load; 3044 } 3045 3046 if (VecTy && !IsWholeAlloca && IsDest) { 3047 Value *Old = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI, 3048 NewAI.getAlign(), "oldload"); 3049 Src = insertVector(IRB, Old, Src, BeginIndex, "vec"); 3050 } else if (IntTy && !IsWholeAlloca && IsDest) { 3051 Value *Old = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI, 3052 NewAI.getAlign(), "oldload"); 3053 Old = convertValue(DL, IRB, Old, IntTy); 3054 uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset; 3055 Src = insertInteger(DL, IRB, Old, Src, Offset, "insert"); 3056 Src = convertValue(DL, IRB, Src, NewAllocaTy); 3057 } 3058 3059 StoreInst *Store = cast<StoreInst>( 3060 IRB.CreateAlignedStore(Src, DstPtr, DstAlign, II.isVolatile())); 3061 Store->copyMetadata(II, {LLVMContext::MD_mem_parallel_loop_access, 3062 LLVMContext::MD_access_group}); 3063 if (AATags) 3064 Store->setAAMetadata(AATags.shift(NewBeginOffset - BeginOffset)); 3065 LLVM_DEBUG(dbgs() << " to: " << *Store << "\n"); 3066 return !II.isVolatile(); 3067 } 3068 3069 bool visitIntrinsicInst(IntrinsicInst &II) { 3070 assert((II.isLifetimeStartOrEnd() || II.isDroppable()) && 3071 "Unexpected intrinsic!"); 3072 LLVM_DEBUG(dbgs() << " original: " << II << "\n"); 3073 3074 // Record this instruction for deletion. 3075 Pass.DeadInsts.push_back(&II); 3076 3077 if (II.isDroppable()) { 3078 assert(II.getIntrinsicID() == Intrinsic::assume && "Expected assume"); 3079 // TODO For now we forget assumed information, this can be improved. 3080 OldPtr->dropDroppableUsesIn(II); 3081 return true; 3082 } 3083 3084 assert(II.getArgOperand(1) == OldPtr); 3085 // Lifetime intrinsics are only promotable if they cover the whole alloca. 3086 // Therefore, we drop lifetime intrinsics which don't cover the whole 3087 // alloca. 3088 // (In theory, intrinsics which partially cover an alloca could be 3089 // promoted, but PromoteMemToReg doesn't handle that case.) 3090 // FIXME: Check whether the alloca is promotable before dropping the 3091 // lifetime intrinsics? 3092 if (NewBeginOffset != NewAllocaBeginOffset || 3093 NewEndOffset != NewAllocaEndOffset) 3094 return true; 3095 3096 ConstantInt *Size = 3097 ConstantInt::get(cast<IntegerType>(II.getArgOperand(0)->getType()), 3098 NewEndOffset - NewBeginOffset); 3099 // Lifetime intrinsics always expect an i8* so directly get such a pointer 3100 // for the new alloca slice. 3101 Type *PointerTy = IRB.getInt8PtrTy(OldPtr->getType()->getPointerAddressSpace()); 3102 Value *Ptr = getNewAllocaSlicePtr(IRB, PointerTy); 3103 Value *New; 3104 if (II.getIntrinsicID() == Intrinsic::lifetime_start) 3105 New = IRB.CreateLifetimeStart(Ptr, Size); 3106 else 3107 New = IRB.CreateLifetimeEnd(Ptr, Size); 3108 3109 (void)New; 3110 LLVM_DEBUG(dbgs() << " to: " << *New << "\n"); 3111 3112 return true; 3113 } 3114 3115 void fixLoadStoreAlign(Instruction &Root) { 3116 // This algorithm implements the same visitor loop as 3117 // hasUnsafePHIOrSelectUse, and fixes the alignment of each load 3118 // or store found. 3119 SmallPtrSet<Instruction *, 4> Visited; 3120 SmallVector<Instruction *, 4> Uses; 3121 Visited.insert(&Root); 3122 Uses.push_back(&Root); 3123 do { 3124 Instruction *I = Uses.pop_back_val(); 3125 3126 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 3127 LI->setAlignment(std::min(LI->getAlign(), getSliceAlign())); 3128 continue; 3129 } 3130 if (StoreInst *SI = dyn_cast<StoreInst>(I)) { 3131 SI->setAlignment(std::min(SI->getAlign(), getSliceAlign())); 3132 continue; 3133 } 3134 3135 assert(isa<BitCastInst>(I) || isa<AddrSpaceCastInst>(I) || 3136 isa<PHINode>(I) || isa<SelectInst>(I) || 3137 isa<GetElementPtrInst>(I)); 3138 for (User *U : I->users()) 3139 if (Visited.insert(cast<Instruction>(U)).second) 3140 Uses.push_back(cast<Instruction>(U)); 3141 } while (!Uses.empty()); 3142 } 3143 3144 bool visitPHINode(PHINode &PN) { 3145 LLVM_DEBUG(dbgs() << " original: " << PN << "\n"); 3146 assert(BeginOffset >= NewAllocaBeginOffset && "PHIs are unsplittable"); 3147 assert(EndOffset <= NewAllocaEndOffset && "PHIs are unsplittable"); 3148 3149 // We would like to compute a new pointer in only one place, but have it be 3150 // as local as possible to the PHI. To do that, we re-use the location of 3151 // the old pointer, which necessarily must be in the right position to 3152 // dominate the PHI. 3153 IRBuilderBase::InsertPointGuard Guard(IRB); 3154 if (isa<PHINode>(OldPtr)) 3155 IRB.SetInsertPoint(&*OldPtr->getParent()->getFirstInsertionPt()); 3156 else 3157 IRB.SetInsertPoint(OldPtr); 3158 IRB.SetCurrentDebugLocation(OldPtr->getDebugLoc()); 3159 3160 Value *NewPtr = getNewAllocaSlicePtr(IRB, OldPtr->getType()); 3161 // Replace the operands which were using the old pointer. 3162 std::replace(PN.op_begin(), PN.op_end(), cast<Value>(OldPtr), NewPtr); 3163 3164 LLVM_DEBUG(dbgs() << " to: " << PN << "\n"); 3165 deleteIfTriviallyDead(OldPtr); 3166 3167 // Fix the alignment of any loads or stores using this PHI node. 3168 fixLoadStoreAlign(PN); 3169 3170 // PHIs can't be promoted on their own, but often can be speculated. We 3171 // check the speculation outside of the rewriter so that we see the 3172 // fully-rewritten alloca. 3173 PHIUsers.insert(&PN); 3174 return true; 3175 } 3176 3177 bool visitSelectInst(SelectInst &SI) { 3178 LLVM_DEBUG(dbgs() << " original: " << SI << "\n"); 3179 assert((SI.getTrueValue() == OldPtr || SI.getFalseValue() == OldPtr) && 3180 "Pointer isn't an operand!"); 3181 assert(BeginOffset >= NewAllocaBeginOffset && "Selects are unsplittable"); 3182 assert(EndOffset <= NewAllocaEndOffset && "Selects are unsplittable"); 3183 3184 Value *NewPtr = getNewAllocaSlicePtr(IRB, OldPtr->getType()); 3185 // Replace the operands which were using the old pointer. 3186 if (SI.getOperand(1) == OldPtr) 3187 SI.setOperand(1, NewPtr); 3188 if (SI.getOperand(2) == OldPtr) 3189 SI.setOperand(2, NewPtr); 3190 3191 LLVM_DEBUG(dbgs() << " to: " << SI << "\n"); 3192 deleteIfTriviallyDead(OldPtr); 3193 3194 // Fix the alignment of any loads or stores using this select. 3195 fixLoadStoreAlign(SI); 3196 3197 // Selects can't be promoted on their own, but often can be speculated. We 3198 // check the speculation outside of the rewriter so that we see the 3199 // fully-rewritten alloca. 3200 SelectUsers.insert(&SI); 3201 return true; 3202 } 3203 }; 3204 3205 namespace { 3206 3207 /// Visitor to rewrite aggregate loads and stores as scalar. 3208 /// 3209 /// This pass aggressively rewrites all aggregate loads and stores on 3210 /// a particular pointer (or any pointer derived from it which we can identify) 3211 /// with scalar loads and stores. 3212 class AggLoadStoreRewriter : public InstVisitor<AggLoadStoreRewriter, bool> { 3213 // Befriend the base class so it can delegate to private visit methods. 3214 friend class InstVisitor<AggLoadStoreRewriter, bool>; 3215 3216 /// Queue of pointer uses to analyze and potentially rewrite. 3217 SmallVector<Use *, 8> Queue; 3218 3219 /// Set to prevent us from cycling with phi nodes and loops. 3220 SmallPtrSet<User *, 8> Visited; 3221 3222 /// The current pointer use being rewritten. This is used to dig up the used 3223 /// value (as opposed to the user). 3224 Use *U = nullptr; 3225 3226 /// Used to calculate offsets, and hence alignment, of subobjects. 3227 const DataLayout &DL; 3228 3229 IRBuilderTy &IRB; 3230 3231 public: 3232 AggLoadStoreRewriter(const DataLayout &DL, IRBuilderTy &IRB) 3233 : DL(DL), IRB(IRB) {} 3234 3235 /// Rewrite loads and stores through a pointer and all pointers derived from 3236 /// it. 3237 bool rewrite(Instruction &I) { 3238 LLVM_DEBUG(dbgs() << " Rewriting FCA loads and stores...\n"); 3239 enqueueUsers(I); 3240 bool Changed = false; 3241 while (!Queue.empty()) { 3242 U = Queue.pop_back_val(); 3243 Changed |= visit(cast<Instruction>(U->getUser())); 3244 } 3245 return Changed; 3246 } 3247 3248 private: 3249 /// Enqueue all the users of the given instruction for further processing. 3250 /// This uses a set to de-duplicate users. 3251 void enqueueUsers(Instruction &I) { 3252 for (Use &U : I.uses()) 3253 if (Visited.insert(U.getUser()).second) 3254 Queue.push_back(&U); 3255 } 3256 3257 // Conservative default is to not rewrite anything. 3258 bool visitInstruction(Instruction &I) { return false; } 3259 3260 /// Generic recursive split emission class. 3261 template <typename Derived> class OpSplitter { 3262 protected: 3263 /// The builder used to form new instructions. 3264 IRBuilderTy &IRB; 3265 3266 /// The indices which to be used with insert- or extractvalue to select the 3267 /// appropriate value within the aggregate. 3268 SmallVector<unsigned, 4> Indices; 3269 3270 /// The indices to a GEP instruction which will move Ptr to the correct slot 3271 /// within the aggregate. 3272 SmallVector<Value *, 4> GEPIndices; 3273 3274 /// The base pointer of the original op, used as a base for GEPing the 3275 /// split operations. 3276 Value *Ptr; 3277 3278 /// The base pointee type being GEPed into. 3279 Type *BaseTy; 3280 3281 /// Known alignment of the base pointer. 3282 Align BaseAlign; 3283 3284 /// To calculate offset of each component so we can correctly deduce 3285 /// alignments. 3286 const DataLayout &DL; 3287 3288 /// Initialize the splitter with an insertion point, Ptr and start with a 3289 /// single zero GEP index. 3290 OpSplitter(Instruction *InsertionPoint, Value *Ptr, Type *BaseTy, 3291 Align BaseAlign, const DataLayout &DL, IRBuilderTy &IRB) 3292 : IRB(IRB), GEPIndices(1, IRB.getInt32(0)), Ptr(Ptr), BaseTy(BaseTy), 3293 BaseAlign(BaseAlign), DL(DL) { 3294 IRB.SetInsertPoint(InsertionPoint); 3295 } 3296 3297 public: 3298 /// Generic recursive split emission routine. 3299 /// 3300 /// This method recursively splits an aggregate op (load or store) into 3301 /// scalar or vector ops. It splits recursively until it hits a single value 3302 /// and emits that single value operation via the template argument. 3303 /// 3304 /// The logic of this routine relies on GEPs and insertvalue and 3305 /// extractvalue all operating with the same fundamental index list, merely 3306 /// formatted differently (GEPs need actual values). 3307 /// 3308 /// \param Ty The type being split recursively into smaller ops. 3309 /// \param Agg The aggregate value being built up or stored, depending on 3310 /// whether this is splitting a load or a store respectively. 3311 void emitSplitOps(Type *Ty, Value *&Agg, const Twine &Name) { 3312 if (Ty->isSingleValueType()) { 3313 unsigned Offset = DL.getIndexedOffsetInType(BaseTy, GEPIndices); 3314 return static_cast<Derived *>(this)->emitFunc( 3315 Ty, Agg, commonAlignment(BaseAlign, Offset), Name); 3316 } 3317 3318 if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) { 3319 unsigned OldSize = Indices.size(); 3320 (void)OldSize; 3321 for (unsigned Idx = 0, Size = ATy->getNumElements(); Idx != Size; 3322 ++Idx) { 3323 assert(Indices.size() == OldSize && "Did not return to the old size"); 3324 Indices.push_back(Idx); 3325 GEPIndices.push_back(IRB.getInt32(Idx)); 3326 emitSplitOps(ATy->getElementType(), Agg, Name + "." + Twine(Idx)); 3327 GEPIndices.pop_back(); 3328 Indices.pop_back(); 3329 } 3330 return; 3331 } 3332 3333 if (StructType *STy = dyn_cast<StructType>(Ty)) { 3334 unsigned OldSize = Indices.size(); 3335 (void)OldSize; 3336 for (unsigned Idx = 0, Size = STy->getNumElements(); Idx != Size; 3337 ++Idx) { 3338 assert(Indices.size() == OldSize && "Did not return to the old size"); 3339 Indices.push_back(Idx); 3340 GEPIndices.push_back(IRB.getInt32(Idx)); 3341 emitSplitOps(STy->getElementType(Idx), Agg, Name + "." + Twine(Idx)); 3342 GEPIndices.pop_back(); 3343 Indices.pop_back(); 3344 } 3345 return; 3346 } 3347 3348 llvm_unreachable("Only arrays and structs are aggregate loadable types"); 3349 } 3350 }; 3351 3352 struct LoadOpSplitter : public OpSplitter<LoadOpSplitter> { 3353 AAMDNodes AATags; 3354 3355 LoadOpSplitter(Instruction *InsertionPoint, Value *Ptr, Type *BaseTy, 3356 AAMDNodes AATags, Align BaseAlign, const DataLayout &DL, 3357 IRBuilderTy &IRB) 3358 : OpSplitter<LoadOpSplitter>(InsertionPoint, Ptr, BaseTy, BaseAlign, DL, 3359 IRB), 3360 AATags(AATags) {} 3361 3362 /// Emit a leaf load of a single value. This is called at the leaves of the 3363 /// recursive emission to actually load values. 3364 void emitFunc(Type *Ty, Value *&Agg, Align Alignment, const Twine &Name) { 3365 assert(Ty->isSingleValueType()); 3366 // Load the single value and insert it using the indices. 3367 Value *GEP = 3368 IRB.CreateInBoundsGEP(BaseTy, Ptr, GEPIndices, Name + ".gep"); 3369 LoadInst *Load = 3370 IRB.CreateAlignedLoad(Ty, GEP, Alignment, Name + ".load"); 3371 3372 APInt Offset( 3373 DL.getIndexSizeInBits(Ptr->getType()->getPointerAddressSpace()), 0); 3374 if (AATags && 3375 GEPOperator::accumulateConstantOffset(BaseTy, GEPIndices, DL, Offset)) 3376 Load->setAAMetadata(AATags.shift(Offset.getZExtValue())); 3377 3378 Agg = IRB.CreateInsertValue(Agg, Load, Indices, Name + ".insert"); 3379 LLVM_DEBUG(dbgs() << " to: " << *Load << "\n"); 3380 } 3381 }; 3382 3383 bool visitLoadInst(LoadInst &LI) { 3384 assert(LI.getPointerOperand() == *U); 3385 if (!LI.isSimple() || LI.getType()->isSingleValueType()) 3386 return false; 3387 3388 // We have an aggregate being loaded, split it apart. 3389 LLVM_DEBUG(dbgs() << " original: " << LI << "\n"); 3390 LoadOpSplitter Splitter(&LI, *U, LI.getType(), LI.getAAMetadata(), 3391 getAdjustedAlignment(&LI, 0), DL, IRB); 3392 Value *V = PoisonValue::get(LI.getType()); 3393 Splitter.emitSplitOps(LI.getType(), V, LI.getName() + ".fca"); 3394 Visited.erase(&LI); 3395 LI.replaceAllUsesWith(V); 3396 LI.eraseFromParent(); 3397 return true; 3398 } 3399 3400 struct StoreOpSplitter : public OpSplitter<StoreOpSplitter> { 3401 StoreOpSplitter(Instruction *InsertionPoint, Value *Ptr, Type *BaseTy, 3402 AAMDNodes AATags, Align BaseAlign, const DataLayout &DL, 3403 IRBuilderTy &IRB) 3404 : OpSplitter<StoreOpSplitter>(InsertionPoint, Ptr, BaseTy, BaseAlign, 3405 DL, IRB), 3406 AATags(AATags) {} 3407 AAMDNodes AATags; 3408 /// Emit a leaf store of a single value. This is called at the leaves of the 3409 /// recursive emission to actually produce stores. 3410 void emitFunc(Type *Ty, Value *&Agg, Align Alignment, const Twine &Name) { 3411 assert(Ty->isSingleValueType()); 3412 // Extract the single value and store it using the indices. 3413 // 3414 // The gep and extractvalue values are factored out of the CreateStore 3415 // call to make the output independent of the argument evaluation order. 3416 Value *ExtractValue = 3417 IRB.CreateExtractValue(Agg, Indices, Name + ".extract"); 3418 Value *InBoundsGEP = 3419 IRB.CreateInBoundsGEP(BaseTy, Ptr, GEPIndices, Name + ".gep"); 3420 StoreInst *Store = 3421 IRB.CreateAlignedStore(ExtractValue, InBoundsGEP, Alignment); 3422 3423 APInt Offset( 3424 DL.getIndexSizeInBits(Ptr->getType()->getPointerAddressSpace()), 0); 3425 if (AATags && 3426 GEPOperator::accumulateConstantOffset(BaseTy, GEPIndices, DL, Offset)) 3427 Store->setAAMetadata(AATags.shift(Offset.getZExtValue())); 3428 3429 LLVM_DEBUG(dbgs() << " to: " << *Store << "\n"); 3430 } 3431 }; 3432 3433 bool visitStoreInst(StoreInst &SI) { 3434 if (!SI.isSimple() || SI.getPointerOperand() != *U) 3435 return false; 3436 Value *V = SI.getValueOperand(); 3437 if (V->getType()->isSingleValueType()) 3438 return false; 3439 3440 // We have an aggregate being stored, split it apart. 3441 LLVM_DEBUG(dbgs() << " original: " << SI << "\n"); 3442 StoreOpSplitter Splitter(&SI, *U, V->getType(), SI.getAAMetadata(), 3443 getAdjustedAlignment(&SI, 0), DL, IRB); 3444 Splitter.emitSplitOps(V->getType(), V, V->getName() + ".fca"); 3445 Visited.erase(&SI); 3446 SI.eraseFromParent(); 3447 return true; 3448 } 3449 3450 bool visitBitCastInst(BitCastInst &BC) { 3451 enqueueUsers(BC); 3452 return false; 3453 } 3454 3455 bool visitAddrSpaceCastInst(AddrSpaceCastInst &ASC) { 3456 enqueueUsers(ASC); 3457 return false; 3458 } 3459 3460 // Fold gep (select cond, ptr1, ptr2) => select cond, gep(ptr1), gep(ptr2) 3461 bool foldGEPSelect(GetElementPtrInst &GEPI) { 3462 if (!GEPI.hasAllConstantIndices()) 3463 return false; 3464 3465 SelectInst *Sel = cast<SelectInst>(GEPI.getPointerOperand()); 3466 3467 LLVM_DEBUG(dbgs() << " Rewriting gep(select) -> select(gep):" 3468 << "\n original: " << *Sel 3469 << "\n " << GEPI); 3470 3471 IRB.SetInsertPoint(&GEPI); 3472 SmallVector<Value *, 4> Index(GEPI.indices()); 3473 bool IsInBounds = GEPI.isInBounds(); 3474 3475 Type *Ty = GEPI.getSourceElementType(); 3476 Value *True = Sel->getTrueValue(); 3477 Value *NTrue = IRB.CreateGEP(Ty, True, Index, True->getName() + ".sroa.gep", 3478 IsInBounds); 3479 3480 Value *False = Sel->getFalseValue(); 3481 3482 Value *NFalse = IRB.CreateGEP(Ty, False, Index, 3483 False->getName() + ".sroa.gep", IsInBounds); 3484 3485 Value *NSel = IRB.CreateSelect(Sel->getCondition(), NTrue, NFalse, 3486 Sel->getName() + ".sroa.sel"); 3487 Visited.erase(&GEPI); 3488 GEPI.replaceAllUsesWith(NSel); 3489 GEPI.eraseFromParent(); 3490 Instruction *NSelI = cast<Instruction>(NSel); 3491 Visited.insert(NSelI); 3492 enqueueUsers(*NSelI); 3493 3494 LLVM_DEBUG(dbgs() << "\n to: " << *NTrue 3495 << "\n " << *NFalse 3496 << "\n " << *NSel << '\n'); 3497 3498 return true; 3499 } 3500 3501 // Fold gep (phi ptr1, ptr2) => phi gep(ptr1), gep(ptr2) 3502 bool foldGEPPhi(GetElementPtrInst &GEPI) { 3503 if (!GEPI.hasAllConstantIndices()) 3504 return false; 3505 3506 PHINode *PHI = cast<PHINode>(GEPI.getPointerOperand()); 3507 if (GEPI.getParent() != PHI->getParent() || 3508 llvm::any_of(PHI->incoming_values(), [](Value *In) 3509 { Instruction *I = dyn_cast<Instruction>(In); 3510 return !I || isa<GetElementPtrInst>(I) || isa<PHINode>(I) || 3511 succ_empty(I->getParent()) || 3512 !I->getParent()->isLegalToHoistInto(); 3513 })) 3514 return false; 3515 3516 LLVM_DEBUG(dbgs() << " Rewriting gep(phi) -> phi(gep):" 3517 << "\n original: " << *PHI 3518 << "\n " << GEPI 3519 << "\n to: "); 3520 3521 SmallVector<Value *, 4> Index(GEPI.indices()); 3522 bool IsInBounds = GEPI.isInBounds(); 3523 IRB.SetInsertPoint(GEPI.getParent()->getFirstNonPHI()); 3524 PHINode *NewPN = IRB.CreatePHI(GEPI.getType(), PHI->getNumIncomingValues(), 3525 PHI->getName() + ".sroa.phi"); 3526 for (unsigned I = 0, E = PHI->getNumIncomingValues(); I != E; ++I) { 3527 BasicBlock *B = PHI->getIncomingBlock(I); 3528 Value *NewVal = nullptr; 3529 int Idx = NewPN->getBasicBlockIndex(B); 3530 if (Idx >= 0) { 3531 NewVal = NewPN->getIncomingValue(Idx); 3532 } else { 3533 Instruction *In = cast<Instruction>(PHI->getIncomingValue(I)); 3534 3535 IRB.SetInsertPoint(In->getParent(), std::next(In->getIterator())); 3536 Type *Ty = GEPI.getSourceElementType(); 3537 NewVal = IRB.CreateGEP(Ty, In, Index, In->getName() + ".sroa.gep", 3538 IsInBounds); 3539 } 3540 NewPN->addIncoming(NewVal, B); 3541 } 3542 3543 Visited.erase(&GEPI); 3544 GEPI.replaceAllUsesWith(NewPN); 3545 GEPI.eraseFromParent(); 3546 Visited.insert(NewPN); 3547 enqueueUsers(*NewPN); 3548 3549 LLVM_DEBUG(for (Value *In : NewPN->incoming_values()) 3550 dbgs() << "\n " << *In; 3551 dbgs() << "\n " << *NewPN << '\n'); 3552 3553 return true; 3554 } 3555 3556 bool visitGetElementPtrInst(GetElementPtrInst &GEPI) { 3557 if (isa<SelectInst>(GEPI.getPointerOperand()) && 3558 foldGEPSelect(GEPI)) 3559 return true; 3560 3561 if (isa<PHINode>(GEPI.getPointerOperand()) && 3562 foldGEPPhi(GEPI)) 3563 return true; 3564 3565 enqueueUsers(GEPI); 3566 return false; 3567 } 3568 3569 bool visitPHINode(PHINode &PN) { 3570 enqueueUsers(PN); 3571 return false; 3572 } 3573 3574 bool visitSelectInst(SelectInst &SI) { 3575 enqueueUsers(SI); 3576 return false; 3577 } 3578 }; 3579 3580 } // end anonymous namespace 3581 3582 /// Strip aggregate type wrapping. 3583 /// 3584 /// This removes no-op aggregate types wrapping an underlying type. It will 3585 /// strip as many layers of types as it can without changing either the type 3586 /// size or the allocated size. 3587 static Type *stripAggregateTypeWrapping(const DataLayout &DL, Type *Ty) { 3588 if (Ty->isSingleValueType()) 3589 return Ty; 3590 3591 uint64_t AllocSize = DL.getTypeAllocSize(Ty).getFixedSize(); 3592 uint64_t TypeSize = DL.getTypeSizeInBits(Ty).getFixedSize(); 3593 3594 Type *InnerTy; 3595 if (ArrayType *ArrTy = dyn_cast<ArrayType>(Ty)) { 3596 InnerTy = ArrTy->getElementType(); 3597 } else if (StructType *STy = dyn_cast<StructType>(Ty)) { 3598 const StructLayout *SL = DL.getStructLayout(STy); 3599 unsigned Index = SL->getElementContainingOffset(0); 3600 InnerTy = STy->getElementType(Index); 3601 } else { 3602 return Ty; 3603 } 3604 3605 if (AllocSize > DL.getTypeAllocSize(InnerTy).getFixedSize() || 3606 TypeSize > DL.getTypeSizeInBits(InnerTy).getFixedSize()) 3607 return Ty; 3608 3609 return stripAggregateTypeWrapping(DL, InnerTy); 3610 } 3611 3612 /// Try to find a partition of the aggregate type passed in for a given 3613 /// offset and size. 3614 /// 3615 /// This recurses through the aggregate type and tries to compute a subtype 3616 /// based on the offset and size. When the offset and size span a sub-section 3617 /// of an array, it will even compute a new array type for that sub-section, 3618 /// and the same for structs. 3619 /// 3620 /// Note that this routine is very strict and tries to find a partition of the 3621 /// type which produces the *exact* right offset and size. It is not forgiving 3622 /// when the size or offset cause either end of type-based partition to be off. 3623 /// Also, this is a best-effort routine. It is reasonable to give up and not 3624 /// return a type if necessary. 3625 static Type *getTypePartition(const DataLayout &DL, Type *Ty, uint64_t Offset, 3626 uint64_t Size) { 3627 if (Offset == 0 && DL.getTypeAllocSize(Ty).getFixedSize() == Size) 3628 return stripAggregateTypeWrapping(DL, Ty); 3629 if (Offset > DL.getTypeAllocSize(Ty).getFixedSize() || 3630 (DL.getTypeAllocSize(Ty).getFixedSize() - Offset) < Size) 3631 return nullptr; 3632 3633 if (isa<ArrayType>(Ty) || isa<VectorType>(Ty)) { 3634 Type *ElementTy; 3635 uint64_t TyNumElements; 3636 if (auto *AT = dyn_cast<ArrayType>(Ty)) { 3637 ElementTy = AT->getElementType(); 3638 TyNumElements = AT->getNumElements(); 3639 } else { 3640 // FIXME: This isn't right for vectors with non-byte-sized or 3641 // non-power-of-two sized elements. 3642 auto *VT = cast<FixedVectorType>(Ty); 3643 ElementTy = VT->getElementType(); 3644 TyNumElements = VT->getNumElements(); 3645 } 3646 uint64_t ElementSize = DL.getTypeAllocSize(ElementTy).getFixedSize(); 3647 uint64_t NumSkippedElements = Offset / ElementSize; 3648 if (NumSkippedElements >= TyNumElements) 3649 return nullptr; 3650 Offset -= NumSkippedElements * ElementSize; 3651 3652 // First check if we need to recurse. 3653 if (Offset > 0 || Size < ElementSize) { 3654 // Bail if the partition ends in a different array element. 3655 if ((Offset + Size) > ElementSize) 3656 return nullptr; 3657 // Recurse through the element type trying to peel off offset bytes. 3658 return getTypePartition(DL, ElementTy, Offset, Size); 3659 } 3660 assert(Offset == 0); 3661 3662 if (Size == ElementSize) 3663 return stripAggregateTypeWrapping(DL, ElementTy); 3664 assert(Size > ElementSize); 3665 uint64_t NumElements = Size / ElementSize; 3666 if (NumElements * ElementSize != Size) 3667 return nullptr; 3668 return ArrayType::get(ElementTy, NumElements); 3669 } 3670 3671 StructType *STy = dyn_cast<StructType>(Ty); 3672 if (!STy) 3673 return nullptr; 3674 3675 const StructLayout *SL = DL.getStructLayout(STy); 3676 if (Offset >= SL->getSizeInBytes()) 3677 return nullptr; 3678 uint64_t EndOffset = Offset + Size; 3679 if (EndOffset > SL->getSizeInBytes()) 3680 return nullptr; 3681 3682 unsigned Index = SL->getElementContainingOffset(Offset); 3683 Offset -= SL->getElementOffset(Index); 3684 3685 Type *ElementTy = STy->getElementType(Index); 3686 uint64_t ElementSize = DL.getTypeAllocSize(ElementTy).getFixedSize(); 3687 if (Offset >= ElementSize) 3688 return nullptr; // The offset points into alignment padding. 3689 3690 // See if any partition must be contained by the element. 3691 if (Offset > 0 || Size < ElementSize) { 3692 if ((Offset + Size) > ElementSize) 3693 return nullptr; 3694 return getTypePartition(DL, ElementTy, Offset, Size); 3695 } 3696 assert(Offset == 0); 3697 3698 if (Size == ElementSize) 3699 return stripAggregateTypeWrapping(DL, ElementTy); 3700 3701 StructType::element_iterator EI = STy->element_begin() + Index, 3702 EE = STy->element_end(); 3703 if (EndOffset < SL->getSizeInBytes()) { 3704 unsigned EndIndex = SL->getElementContainingOffset(EndOffset); 3705 if (Index == EndIndex) 3706 return nullptr; // Within a single element and its padding. 3707 3708 // Don't try to form "natural" types if the elements don't line up with the 3709 // expected size. 3710 // FIXME: We could potentially recurse down through the last element in the 3711 // sub-struct to find a natural end point. 3712 if (SL->getElementOffset(EndIndex) != EndOffset) 3713 return nullptr; 3714 3715 assert(Index < EndIndex); 3716 EE = STy->element_begin() + EndIndex; 3717 } 3718 3719 // Try to build up a sub-structure. 3720 StructType *SubTy = 3721 StructType::get(STy->getContext(), makeArrayRef(EI, EE), STy->isPacked()); 3722 const StructLayout *SubSL = DL.getStructLayout(SubTy); 3723 if (Size != SubSL->getSizeInBytes()) 3724 return nullptr; // The sub-struct doesn't have quite the size needed. 3725 3726 return SubTy; 3727 } 3728 3729 /// Pre-split loads and stores to simplify rewriting. 3730 /// 3731 /// We want to break up the splittable load+store pairs as much as 3732 /// possible. This is important to do as a preprocessing step, as once we 3733 /// start rewriting the accesses to partitions of the alloca we lose the 3734 /// necessary information to correctly split apart paired loads and stores 3735 /// which both point into this alloca. The case to consider is something like 3736 /// the following: 3737 /// 3738 /// %a = alloca [12 x i8] 3739 /// %gep1 = getelementptr [12 x i8]* %a, i32 0, i32 0 3740 /// %gep2 = getelementptr [12 x i8]* %a, i32 0, i32 4 3741 /// %gep3 = getelementptr [12 x i8]* %a, i32 0, i32 8 3742 /// %iptr1 = bitcast i8* %gep1 to i64* 3743 /// %iptr2 = bitcast i8* %gep2 to i64* 3744 /// %fptr1 = bitcast i8* %gep1 to float* 3745 /// %fptr2 = bitcast i8* %gep2 to float* 3746 /// %fptr3 = bitcast i8* %gep3 to float* 3747 /// store float 0.0, float* %fptr1 3748 /// store float 1.0, float* %fptr2 3749 /// %v = load i64* %iptr1 3750 /// store i64 %v, i64* %iptr2 3751 /// %f1 = load float* %fptr2 3752 /// %f2 = load float* %fptr3 3753 /// 3754 /// Here we want to form 3 partitions of the alloca, each 4 bytes large, and 3755 /// promote everything so we recover the 2 SSA values that should have been 3756 /// there all along. 3757 /// 3758 /// \returns true if any changes are made. 3759 bool SROAPass::presplitLoadsAndStores(AllocaInst &AI, AllocaSlices &AS) { 3760 LLVM_DEBUG(dbgs() << "Pre-splitting loads and stores\n"); 3761 3762 // Track the loads and stores which are candidates for pre-splitting here, in 3763 // the order they first appear during the partition scan. These give stable 3764 // iteration order and a basis for tracking which loads and stores we 3765 // actually split. 3766 SmallVector<LoadInst *, 4> Loads; 3767 SmallVector<StoreInst *, 4> Stores; 3768 3769 // We need to accumulate the splits required of each load or store where we 3770 // can find them via a direct lookup. This is important to cross-check loads 3771 // and stores against each other. We also track the slice so that we can kill 3772 // all the slices that end up split. 3773 struct SplitOffsets { 3774 Slice *S; 3775 std::vector<uint64_t> Splits; 3776 }; 3777 SmallDenseMap<Instruction *, SplitOffsets, 8> SplitOffsetsMap; 3778 3779 // Track loads out of this alloca which cannot, for any reason, be pre-split. 3780 // This is important as we also cannot pre-split stores of those loads! 3781 // FIXME: This is all pretty gross. It means that we can be more aggressive 3782 // in pre-splitting when the load feeding the store happens to come from 3783 // a separate alloca. Put another way, the effectiveness of SROA would be 3784 // decreased by a frontend which just concatenated all of its local allocas 3785 // into one big flat alloca. But defeating such patterns is exactly the job 3786 // SROA is tasked with! Sadly, to not have this discrepancy we would have 3787 // change store pre-splitting to actually force pre-splitting of the load 3788 // that feeds it *and all stores*. That makes pre-splitting much harder, but 3789 // maybe it would make it more principled? 3790 SmallPtrSet<LoadInst *, 8> UnsplittableLoads; 3791 3792 LLVM_DEBUG(dbgs() << " Searching for candidate loads and stores\n"); 3793 for (auto &P : AS.partitions()) { 3794 for (Slice &S : P) { 3795 Instruction *I = cast<Instruction>(S.getUse()->getUser()); 3796 if (!S.isSplittable() || S.endOffset() <= P.endOffset()) { 3797 // If this is a load we have to track that it can't participate in any 3798 // pre-splitting. If this is a store of a load we have to track that 3799 // that load also can't participate in any pre-splitting. 3800 if (auto *LI = dyn_cast<LoadInst>(I)) 3801 UnsplittableLoads.insert(LI); 3802 else if (auto *SI = dyn_cast<StoreInst>(I)) 3803 if (auto *LI = dyn_cast<LoadInst>(SI->getValueOperand())) 3804 UnsplittableLoads.insert(LI); 3805 continue; 3806 } 3807 assert(P.endOffset() > S.beginOffset() && 3808 "Empty or backwards partition!"); 3809 3810 // Determine if this is a pre-splittable slice. 3811 if (auto *LI = dyn_cast<LoadInst>(I)) { 3812 assert(!LI->isVolatile() && "Cannot split volatile loads!"); 3813 3814 // The load must be used exclusively to store into other pointers for 3815 // us to be able to arbitrarily pre-split it. The stores must also be 3816 // simple to avoid changing semantics. 3817 auto IsLoadSimplyStored = [](LoadInst *LI) { 3818 for (User *LU : LI->users()) { 3819 auto *SI = dyn_cast<StoreInst>(LU); 3820 if (!SI || !SI->isSimple()) 3821 return false; 3822 } 3823 return true; 3824 }; 3825 if (!IsLoadSimplyStored(LI)) { 3826 UnsplittableLoads.insert(LI); 3827 continue; 3828 } 3829 3830 Loads.push_back(LI); 3831 } else if (auto *SI = dyn_cast<StoreInst>(I)) { 3832 if (S.getUse() != &SI->getOperandUse(SI->getPointerOperandIndex())) 3833 // Skip stores *of* pointers. FIXME: This shouldn't even be possible! 3834 continue; 3835 auto *StoredLoad = dyn_cast<LoadInst>(SI->getValueOperand()); 3836 if (!StoredLoad || !StoredLoad->isSimple()) 3837 continue; 3838 assert(!SI->isVolatile() && "Cannot split volatile stores!"); 3839 3840 Stores.push_back(SI); 3841 } else { 3842 // Other uses cannot be pre-split. 3843 continue; 3844 } 3845 3846 // Record the initial split. 3847 LLVM_DEBUG(dbgs() << " Candidate: " << *I << "\n"); 3848 auto &Offsets = SplitOffsetsMap[I]; 3849 assert(Offsets.Splits.empty() && 3850 "Should not have splits the first time we see an instruction!"); 3851 Offsets.S = &S; 3852 Offsets.Splits.push_back(P.endOffset() - S.beginOffset()); 3853 } 3854 3855 // Now scan the already split slices, and add a split for any of them which 3856 // we're going to pre-split. 3857 for (Slice *S : P.splitSliceTails()) { 3858 auto SplitOffsetsMapI = 3859 SplitOffsetsMap.find(cast<Instruction>(S->getUse()->getUser())); 3860 if (SplitOffsetsMapI == SplitOffsetsMap.end()) 3861 continue; 3862 auto &Offsets = SplitOffsetsMapI->second; 3863 3864 assert(Offsets.S == S && "Found a mismatched slice!"); 3865 assert(!Offsets.Splits.empty() && 3866 "Cannot have an empty set of splits on the second partition!"); 3867 assert(Offsets.Splits.back() == 3868 P.beginOffset() - Offsets.S->beginOffset() && 3869 "Previous split does not end where this one begins!"); 3870 3871 // Record each split. The last partition's end isn't needed as the size 3872 // of the slice dictates that. 3873 if (S->endOffset() > P.endOffset()) 3874 Offsets.Splits.push_back(P.endOffset() - Offsets.S->beginOffset()); 3875 } 3876 } 3877 3878 // We may have split loads where some of their stores are split stores. For 3879 // such loads and stores, we can only pre-split them if their splits exactly 3880 // match relative to their starting offset. We have to verify this prior to 3881 // any rewriting. 3882 llvm::erase_if(Stores, [&UnsplittableLoads, &SplitOffsetsMap](StoreInst *SI) { 3883 // Lookup the load we are storing in our map of split 3884 // offsets. 3885 auto *LI = cast<LoadInst>(SI->getValueOperand()); 3886 // If it was completely unsplittable, then we're done, 3887 // and this store can't be pre-split. 3888 if (UnsplittableLoads.count(LI)) 3889 return true; 3890 3891 auto LoadOffsetsI = SplitOffsetsMap.find(LI); 3892 if (LoadOffsetsI == SplitOffsetsMap.end()) 3893 return false; // Unrelated loads are definitely safe. 3894 auto &LoadOffsets = LoadOffsetsI->second; 3895 3896 // Now lookup the store's offsets. 3897 auto &StoreOffsets = SplitOffsetsMap[SI]; 3898 3899 // If the relative offsets of each split in the load and 3900 // store match exactly, then we can split them and we 3901 // don't need to remove them here. 3902 if (LoadOffsets.Splits == StoreOffsets.Splits) 3903 return false; 3904 3905 LLVM_DEBUG(dbgs() << " Mismatched splits for load and store:\n" 3906 << " " << *LI << "\n" 3907 << " " << *SI << "\n"); 3908 3909 // We've found a store and load that we need to split 3910 // with mismatched relative splits. Just give up on them 3911 // and remove both instructions from our list of 3912 // candidates. 3913 UnsplittableLoads.insert(LI); 3914 return true; 3915 }); 3916 // Now we have to go *back* through all the stores, because a later store may 3917 // have caused an earlier store's load to become unsplittable and if it is 3918 // unsplittable for the later store, then we can't rely on it being split in 3919 // the earlier store either. 3920 llvm::erase_if(Stores, [&UnsplittableLoads](StoreInst *SI) { 3921 auto *LI = cast<LoadInst>(SI->getValueOperand()); 3922 return UnsplittableLoads.count(LI); 3923 }); 3924 // Once we've established all the loads that can't be split for some reason, 3925 // filter any that made it into our list out. 3926 llvm::erase_if(Loads, [&UnsplittableLoads](LoadInst *LI) { 3927 return UnsplittableLoads.count(LI); 3928 }); 3929 3930 // If no loads or stores are left, there is no pre-splitting to be done for 3931 // this alloca. 3932 if (Loads.empty() && Stores.empty()) 3933 return false; 3934 3935 // From here on, we can't fail and will be building new accesses, so rig up 3936 // an IR builder. 3937 IRBuilderTy IRB(&AI); 3938 3939 // Collect the new slices which we will merge into the alloca slices. 3940 SmallVector<Slice, 4> NewSlices; 3941 3942 // Track any allocas we end up splitting loads and stores for so we iterate 3943 // on them. 3944 SmallPtrSet<AllocaInst *, 4> ResplitPromotableAllocas; 3945 3946 // At this point, we have collected all of the loads and stores we can 3947 // pre-split, and the specific splits needed for them. We actually do the 3948 // splitting in a specific order in order to handle when one of the loads in 3949 // the value operand to one of the stores. 3950 // 3951 // First, we rewrite all of the split loads, and just accumulate each split 3952 // load in a parallel structure. We also build the slices for them and append 3953 // them to the alloca slices. 3954 SmallDenseMap<LoadInst *, std::vector<LoadInst *>, 1> SplitLoadsMap; 3955 std::vector<LoadInst *> SplitLoads; 3956 const DataLayout &DL = AI.getModule()->getDataLayout(); 3957 for (LoadInst *LI : Loads) { 3958 SplitLoads.clear(); 3959 3960 auto &Offsets = SplitOffsetsMap[LI]; 3961 unsigned SliceSize = Offsets.S->endOffset() - Offsets.S->beginOffset(); 3962 assert(LI->getType()->getIntegerBitWidth() % 8 == 0 && 3963 "Load must have type size equal to store size"); 3964 assert(LI->getType()->getIntegerBitWidth() / 8 >= SliceSize && 3965 "Load must be >= slice size"); 3966 3967 uint64_t BaseOffset = Offsets.S->beginOffset(); 3968 assert(BaseOffset + SliceSize > BaseOffset && 3969 "Cannot represent alloca access size using 64-bit integers!"); 3970 3971 Instruction *BasePtr = cast<Instruction>(LI->getPointerOperand()); 3972 IRB.SetInsertPoint(LI); 3973 3974 LLVM_DEBUG(dbgs() << " Splitting load: " << *LI << "\n"); 3975 3976 uint64_t PartOffset = 0, PartSize = Offsets.Splits.front(); 3977 int Idx = 0, Size = Offsets.Splits.size(); 3978 for (;;) { 3979 auto *PartTy = Type::getIntNTy(LI->getContext(), PartSize * 8); 3980 auto AS = LI->getPointerAddressSpace(); 3981 auto *PartPtrTy = PartTy->getPointerTo(AS); 3982 LoadInst *PLoad = IRB.CreateAlignedLoad( 3983 PartTy, 3984 getAdjustedPtr(IRB, DL, BasePtr, 3985 APInt(DL.getIndexSizeInBits(AS), PartOffset), 3986 PartPtrTy, BasePtr->getName() + "."), 3987 getAdjustedAlignment(LI, PartOffset), 3988 /*IsVolatile*/ false, LI->getName()); 3989 PLoad->copyMetadata(*LI, {LLVMContext::MD_mem_parallel_loop_access, 3990 LLVMContext::MD_access_group}); 3991 3992 // Append this load onto the list of split loads so we can find it later 3993 // to rewrite the stores. 3994 SplitLoads.push_back(PLoad); 3995 3996 // Now build a new slice for the alloca. 3997 NewSlices.push_back( 3998 Slice(BaseOffset + PartOffset, BaseOffset + PartOffset + PartSize, 3999 &PLoad->getOperandUse(PLoad->getPointerOperandIndex()), 4000 /*IsSplittable*/ false)); 4001 LLVM_DEBUG(dbgs() << " new slice [" << NewSlices.back().beginOffset() 4002 << ", " << NewSlices.back().endOffset() 4003 << "): " << *PLoad << "\n"); 4004 4005 // See if we've handled all the splits. 4006 if (Idx >= Size) 4007 break; 4008 4009 // Setup the next partition. 4010 PartOffset = Offsets.Splits[Idx]; 4011 ++Idx; 4012 PartSize = (Idx < Size ? Offsets.Splits[Idx] : SliceSize) - PartOffset; 4013 } 4014 4015 // Now that we have the split loads, do the slow walk over all uses of the 4016 // load and rewrite them as split stores, or save the split loads to use 4017 // below if the store is going to be split there anyways. 4018 bool DeferredStores = false; 4019 for (User *LU : LI->users()) { 4020 StoreInst *SI = cast<StoreInst>(LU); 4021 if (!Stores.empty() && SplitOffsetsMap.count(SI)) { 4022 DeferredStores = true; 4023 LLVM_DEBUG(dbgs() << " Deferred splitting of store: " << *SI 4024 << "\n"); 4025 continue; 4026 } 4027 4028 Value *StoreBasePtr = SI->getPointerOperand(); 4029 IRB.SetInsertPoint(SI); 4030 4031 LLVM_DEBUG(dbgs() << " Splitting store of load: " << *SI << "\n"); 4032 4033 for (int Idx = 0, Size = SplitLoads.size(); Idx < Size; ++Idx) { 4034 LoadInst *PLoad = SplitLoads[Idx]; 4035 uint64_t PartOffset = Idx == 0 ? 0 : Offsets.Splits[Idx - 1]; 4036 auto *PartPtrTy = 4037 PLoad->getType()->getPointerTo(SI->getPointerAddressSpace()); 4038 4039 auto AS = SI->getPointerAddressSpace(); 4040 StoreInst *PStore = IRB.CreateAlignedStore( 4041 PLoad, 4042 getAdjustedPtr(IRB, DL, StoreBasePtr, 4043 APInt(DL.getIndexSizeInBits(AS), PartOffset), 4044 PartPtrTy, StoreBasePtr->getName() + "."), 4045 getAdjustedAlignment(SI, PartOffset), 4046 /*IsVolatile*/ false); 4047 PStore->copyMetadata(*SI, {LLVMContext::MD_mem_parallel_loop_access, 4048 LLVMContext::MD_access_group}); 4049 LLVM_DEBUG(dbgs() << " +" << PartOffset << ":" << *PStore << "\n"); 4050 } 4051 4052 // We want to immediately iterate on any allocas impacted by splitting 4053 // this store, and we have to track any promotable alloca (indicated by 4054 // a direct store) as needing to be resplit because it is no longer 4055 // promotable. 4056 if (AllocaInst *OtherAI = dyn_cast<AllocaInst>(StoreBasePtr)) { 4057 ResplitPromotableAllocas.insert(OtherAI); 4058 Worklist.insert(OtherAI); 4059 } else if (AllocaInst *OtherAI = dyn_cast<AllocaInst>( 4060 StoreBasePtr->stripInBoundsOffsets())) { 4061 Worklist.insert(OtherAI); 4062 } 4063 4064 // Mark the original store as dead. 4065 DeadInsts.push_back(SI); 4066 } 4067 4068 // Save the split loads if there are deferred stores among the users. 4069 if (DeferredStores) 4070 SplitLoadsMap.insert(std::make_pair(LI, std::move(SplitLoads))); 4071 4072 // Mark the original load as dead and kill the original slice. 4073 DeadInsts.push_back(LI); 4074 Offsets.S->kill(); 4075 } 4076 4077 // Second, we rewrite all of the split stores. At this point, we know that 4078 // all loads from this alloca have been split already. For stores of such 4079 // loads, we can simply look up the pre-existing split loads. For stores of 4080 // other loads, we split those loads first and then write split stores of 4081 // them. 4082 for (StoreInst *SI : Stores) { 4083 auto *LI = cast<LoadInst>(SI->getValueOperand()); 4084 IntegerType *Ty = cast<IntegerType>(LI->getType()); 4085 assert(Ty->getBitWidth() % 8 == 0); 4086 uint64_t StoreSize = Ty->getBitWidth() / 8; 4087 assert(StoreSize > 0 && "Cannot have a zero-sized integer store!"); 4088 4089 auto &Offsets = SplitOffsetsMap[SI]; 4090 assert(StoreSize == Offsets.S->endOffset() - Offsets.S->beginOffset() && 4091 "Slice size should always match load size exactly!"); 4092 uint64_t BaseOffset = Offsets.S->beginOffset(); 4093 assert(BaseOffset + StoreSize > BaseOffset && 4094 "Cannot represent alloca access size using 64-bit integers!"); 4095 4096 Value *LoadBasePtr = LI->getPointerOperand(); 4097 Instruction *StoreBasePtr = cast<Instruction>(SI->getPointerOperand()); 4098 4099 LLVM_DEBUG(dbgs() << " Splitting store: " << *SI << "\n"); 4100 4101 // Check whether we have an already split load. 4102 auto SplitLoadsMapI = SplitLoadsMap.find(LI); 4103 std::vector<LoadInst *> *SplitLoads = nullptr; 4104 if (SplitLoadsMapI != SplitLoadsMap.end()) { 4105 SplitLoads = &SplitLoadsMapI->second; 4106 assert(SplitLoads->size() == Offsets.Splits.size() + 1 && 4107 "Too few split loads for the number of splits in the store!"); 4108 } else { 4109 LLVM_DEBUG(dbgs() << " of load: " << *LI << "\n"); 4110 } 4111 4112 uint64_t PartOffset = 0, PartSize = Offsets.Splits.front(); 4113 int Idx = 0, Size = Offsets.Splits.size(); 4114 for (;;) { 4115 auto *PartTy = Type::getIntNTy(Ty->getContext(), PartSize * 8); 4116 auto *LoadPartPtrTy = PartTy->getPointerTo(LI->getPointerAddressSpace()); 4117 auto *StorePartPtrTy = PartTy->getPointerTo(SI->getPointerAddressSpace()); 4118 4119 // Either lookup a split load or create one. 4120 LoadInst *PLoad; 4121 if (SplitLoads) { 4122 PLoad = (*SplitLoads)[Idx]; 4123 } else { 4124 IRB.SetInsertPoint(LI); 4125 auto AS = LI->getPointerAddressSpace(); 4126 PLoad = IRB.CreateAlignedLoad( 4127 PartTy, 4128 getAdjustedPtr(IRB, DL, LoadBasePtr, 4129 APInt(DL.getIndexSizeInBits(AS), PartOffset), 4130 LoadPartPtrTy, LoadBasePtr->getName() + "."), 4131 getAdjustedAlignment(LI, PartOffset), 4132 /*IsVolatile*/ false, LI->getName()); 4133 PLoad->copyMetadata(*LI, {LLVMContext::MD_mem_parallel_loop_access, 4134 LLVMContext::MD_access_group}); 4135 } 4136 4137 // And store this partition. 4138 IRB.SetInsertPoint(SI); 4139 auto AS = SI->getPointerAddressSpace(); 4140 StoreInst *PStore = IRB.CreateAlignedStore( 4141 PLoad, 4142 getAdjustedPtr(IRB, DL, StoreBasePtr, 4143 APInt(DL.getIndexSizeInBits(AS), PartOffset), 4144 StorePartPtrTy, StoreBasePtr->getName() + "."), 4145 getAdjustedAlignment(SI, PartOffset), 4146 /*IsVolatile*/ false); 4147 PStore->copyMetadata(*SI, {LLVMContext::MD_mem_parallel_loop_access, 4148 LLVMContext::MD_access_group}); 4149 4150 // Now build a new slice for the alloca. 4151 NewSlices.push_back( 4152 Slice(BaseOffset + PartOffset, BaseOffset + PartOffset + PartSize, 4153 &PStore->getOperandUse(PStore->getPointerOperandIndex()), 4154 /*IsSplittable*/ false)); 4155 LLVM_DEBUG(dbgs() << " new slice [" << NewSlices.back().beginOffset() 4156 << ", " << NewSlices.back().endOffset() 4157 << "): " << *PStore << "\n"); 4158 if (!SplitLoads) { 4159 LLVM_DEBUG(dbgs() << " of split load: " << *PLoad << "\n"); 4160 } 4161 4162 // See if we've finished all the splits. 4163 if (Idx >= Size) 4164 break; 4165 4166 // Setup the next partition. 4167 PartOffset = Offsets.Splits[Idx]; 4168 ++Idx; 4169 PartSize = (Idx < Size ? Offsets.Splits[Idx] : StoreSize) - PartOffset; 4170 } 4171 4172 // We want to immediately iterate on any allocas impacted by splitting 4173 // this load, which is only relevant if it isn't a load of this alloca and 4174 // thus we didn't already split the loads above. We also have to keep track 4175 // of any promotable allocas we split loads on as they can no longer be 4176 // promoted. 4177 if (!SplitLoads) { 4178 if (AllocaInst *OtherAI = dyn_cast<AllocaInst>(LoadBasePtr)) { 4179 assert(OtherAI != &AI && "We can't re-split our own alloca!"); 4180 ResplitPromotableAllocas.insert(OtherAI); 4181 Worklist.insert(OtherAI); 4182 } else if (AllocaInst *OtherAI = dyn_cast<AllocaInst>( 4183 LoadBasePtr->stripInBoundsOffsets())) { 4184 assert(OtherAI != &AI && "We can't re-split our own alloca!"); 4185 Worklist.insert(OtherAI); 4186 } 4187 } 4188 4189 // Mark the original store as dead now that we've split it up and kill its 4190 // slice. Note that we leave the original load in place unless this store 4191 // was its only use. It may in turn be split up if it is an alloca load 4192 // for some other alloca, but it may be a normal load. This may introduce 4193 // redundant loads, but where those can be merged the rest of the optimizer 4194 // should handle the merging, and this uncovers SSA splits which is more 4195 // important. In practice, the original loads will almost always be fully 4196 // split and removed eventually, and the splits will be merged by any 4197 // trivial CSE, including instcombine. 4198 if (LI->hasOneUse()) { 4199 assert(*LI->user_begin() == SI && "Single use isn't this store!"); 4200 DeadInsts.push_back(LI); 4201 } 4202 DeadInsts.push_back(SI); 4203 Offsets.S->kill(); 4204 } 4205 4206 // Remove the killed slices that have ben pre-split. 4207 llvm::erase_if(AS, [](const Slice &S) { return S.isDead(); }); 4208 4209 // Insert our new slices. This will sort and merge them into the sorted 4210 // sequence. 4211 AS.insert(NewSlices); 4212 4213 LLVM_DEBUG(dbgs() << " Pre-split slices:\n"); 4214 #ifndef NDEBUG 4215 for (auto I = AS.begin(), E = AS.end(); I != E; ++I) 4216 LLVM_DEBUG(AS.print(dbgs(), I, " ")); 4217 #endif 4218 4219 // Finally, don't try to promote any allocas that new require re-splitting. 4220 // They have already been added to the worklist above. 4221 llvm::erase_if(PromotableAllocas, [&](AllocaInst *AI) { 4222 return ResplitPromotableAllocas.count(AI); 4223 }); 4224 4225 return true; 4226 } 4227 4228 /// Rewrite an alloca partition's users. 4229 /// 4230 /// This routine drives both of the rewriting goals of the SROA pass. It tries 4231 /// to rewrite uses of an alloca partition to be conducive for SSA value 4232 /// promotion. If the partition needs a new, more refined alloca, this will 4233 /// build that new alloca, preserving as much type information as possible, and 4234 /// rewrite the uses of the old alloca to point at the new one and have the 4235 /// appropriate new offsets. It also evaluates how successful the rewrite was 4236 /// at enabling promotion and if it was successful queues the alloca to be 4237 /// promoted. 4238 AllocaInst *SROAPass::rewritePartition(AllocaInst &AI, AllocaSlices &AS, 4239 Partition &P) { 4240 // Try to compute a friendly type for this partition of the alloca. This 4241 // won't always succeed, in which case we fall back to a legal integer type 4242 // or an i8 array of an appropriate size. 4243 Type *SliceTy = nullptr; 4244 const DataLayout &DL = AI.getModule()->getDataLayout(); 4245 std::pair<Type *, IntegerType *> CommonUseTy = 4246 findCommonType(P.begin(), P.end(), P.endOffset()); 4247 // Do all uses operate on the same type? 4248 if (CommonUseTy.first) 4249 if (DL.getTypeAllocSize(CommonUseTy.first).getFixedSize() >= P.size()) 4250 SliceTy = CommonUseTy.first; 4251 // If not, can we find an appropriate subtype in the original allocated type? 4252 if (!SliceTy) 4253 if (Type *TypePartitionTy = getTypePartition(DL, AI.getAllocatedType(), 4254 P.beginOffset(), P.size())) 4255 SliceTy = TypePartitionTy; 4256 // If still not, can we use the largest bitwidth integer type used? 4257 if (!SliceTy && CommonUseTy.second) 4258 if (DL.getTypeAllocSize(CommonUseTy.second).getFixedSize() >= P.size()) 4259 SliceTy = CommonUseTy.second; 4260 if ((!SliceTy || (SliceTy->isArrayTy() && 4261 SliceTy->getArrayElementType()->isIntegerTy())) && 4262 DL.isLegalInteger(P.size() * 8)) 4263 SliceTy = Type::getIntNTy(*C, P.size() * 8); 4264 if (!SliceTy) 4265 SliceTy = ArrayType::get(Type::getInt8Ty(*C), P.size()); 4266 assert(DL.getTypeAllocSize(SliceTy).getFixedSize() >= P.size()); 4267 4268 bool IsIntegerPromotable = isIntegerWideningViable(P, SliceTy, DL); 4269 4270 VectorType *VecTy = 4271 IsIntegerPromotable ? nullptr : isVectorPromotionViable(P, DL); 4272 if (VecTy) 4273 SliceTy = VecTy; 4274 4275 // Check for the case where we're going to rewrite to a new alloca of the 4276 // exact same type as the original, and with the same access offsets. In that 4277 // case, re-use the existing alloca, but still run through the rewriter to 4278 // perform phi and select speculation. 4279 // P.beginOffset() can be non-zero even with the same type in a case with 4280 // out-of-bounds access (e.g. @PR35657 function in SROA/basictest.ll). 4281 AllocaInst *NewAI; 4282 if (SliceTy == AI.getAllocatedType() && P.beginOffset() == 0) { 4283 NewAI = &AI; 4284 // FIXME: We should be able to bail at this point with "nothing changed". 4285 // FIXME: We might want to defer PHI speculation until after here. 4286 // FIXME: return nullptr; 4287 } else { 4288 // Make sure the alignment is compatible with P.beginOffset(). 4289 const Align Alignment = commonAlignment(AI.getAlign(), P.beginOffset()); 4290 // If we will get at least this much alignment from the type alone, leave 4291 // the alloca's alignment unconstrained. 4292 const bool IsUnconstrained = Alignment <= DL.getABITypeAlign(SliceTy); 4293 NewAI = new AllocaInst( 4294 SliceTy, AI.getType()->getAddressSpace(), nullptr, 4295 IsUnconstrained ? DL.getPrefTypeAlign(SliceTy) : Alignment, 4296 AI.getName() + ".sroa." + Twine(P.begin() - AS.begin()), &AI); 4297 // Copy the old AI debug location over to the new one. 4298 NewAI->setDebugLoc(AI.getDebugLoc()); 4299 ++NumNewAllocas; 4300 } 4301 4302 LLVM_DEBUG(dbgs() << "Rewriting alloca partition " 4303 << "[" << P.beginOffset() << "," << P.endOffset() 4304 << ") to: " << *NewAI << "\n"); 4305 4306 // Track the high watermark on the worklist as it is only relevant for 4307 // promoted allocas. We will reset it to this point if the alloca is not in 4308 // fact scheduled for promotion. 4309 unsigned PPWOldSize = PostPromotionWorklist.size(); 4310 unsigned NumUses = 0; 4311 SmallSetVector<PHINode *, 8> PHIUsers; 4312 SmallSetVector<SelectInst *, 8> SelectUsers; 4313 4314 AllocaSliceRewriter Rewriter(DL, AS, *this, AI, *NewAI, P.beginOffset(), 4315 P.endOffset(), IsIntegerPromotable, VecTy, 4316 PHIUsers, SelectUsers); 4317 bool Promotable = true; 4318 for (Slice *S : P.splitSliceTails()) { 4319 Promotable &= Rewriter.visit(S); 4320 ++NumUses; 4321 } 4322 for (Slice &S : P) { 4323 Promotable &= Rewriter.visit(&S); 4324 ++NumUses; 4325 } 4326 4327 NumAllocaPartitionUses += NumUses; 4328 MaxUsesPerAllocaPartition.updateMax(NumUses); 4329 4330 // Now that we've processed all the slices in the new partition, check if any 4331 // PHIs or Selects would block promotion. 4332 for (PHINode *PHI : PHIUsers) 4333 if (!isSafePHIToSpeculate(*PHI)) { 4334 Promotable = false; 4335 PHIUsers.clear(); 4336 SelectUsers.clear(); 4337 break; 4338 } 4339 4340 for (SelectInst *Sel : SelectUsers) 4341 if (!isSafeSelectToSpeculate(*Sel)) { 4342 Promotable = false; 4343 PHIUsers.clear(); 4344 SelectUsers.clear(); 4345 break; 4346 } 4347 4348 if (Promotable) { 4349 for (Use *U : AS.getDeadUsesIfPromotable()) { 4350 auto *OldInst = dyn_cast<Instruction>(U->get()); 4351 Value::dropDroppableUse(*U); 4352 if (OldInst) 4353 if (isInstructionTriviallyDead(OldInst)) 4354 DeadInsts.push_back(OldInst); 4355 } 4356 if (PHIUsers.empty() && SelectUsers.empty()) { 4357 // Promote the alloca. 4358 PromotableAllocas.push_back(NewAI); 4359 } else { 4360 // If we have either PHIs or Selects to speculate, add them to those 4361 // worklists and re-queue the new alloca so that we promote in on the 4362 // next iteration. 4363 for (PHINode *PHIUser : PHIUsers) 4364 SpeculatablePHIs.insert(PHIUser); 4365 for (SelectInst *SelectUser : SelectUsers) 4366 SpeculatableSelects.insert(SelectUser); 4367 Worklist.insert(NewAI); 4368 } 4369 } else { 4370 // Drop any post-promotion work items if promotion didn't happen. 4371 while (PostPromotionWorklist.size() > PPWOldSize) 4372 PostPromotionWorklist.pop_back(); 4373 4374 // We couldn't promote and we didn't create a new partition, nothing 4375 // happened. 4376 if (NewAI == &AI) 4377 return nullptr; 4378 4379 // If we can't promote the alloca, iterate on it to check for new 4380 // refinements exposed by splitting the current alloca. Don't iterate on an 4381 // alloca which didn't actually change and didn't get promoted. 4382 Worklist.insert(NewAI); 4383 } 4384 4385 return NewAI; 4386 } 4387 4388 /// Walks the slices of an alloca and form partitions based on them, 4389 /// rewriting each of their uses. 4390 bool SROAPass::splitAlloca(AllocaInst &AI, AllocaSlices &AS) { 4391 if (AS.begin() == AS.end()) 4392 return false; 4393 4394 unsigned NumPartitions = 0; 4395 bool Changed = false; 4396 const DataLayout &DL = AI.getModule()->getDataLayout(); 4397 4398 // First try to pre-split loads and stores. 4399 Changed |= presplitLoadsAndStores(AI, AS); 4400 4401 // Now that we have identified any pre-splitting opportunities, 4402 // mark loads and stores unsplittable except for the following case. 4403 // We leave a slice splittable if all other slices are disjoint or fully 4404 // included in the slice, such as whole-alloca loads and stores. 4405 // If we fail to split these during pre-splitting, we want to force them 4406 // to be rewritten into a partition. 4407 bool IsSorted = true; 4408 4409 uint64_t AllocaSize = 4410 DL.getTypeAllocSize(AI.getAllocatedType()).getFixedSize(); 4411 const uint64_t MaxBitVectorSize = 1024; 4412 if (AllocaSize <= MaxBitVectorSize) { 4413 // If a byte boundary is included in any load or store, a slice starting or 4414 // ending at the boundary is not splittable. 4415 SmallBitVector SplittableOffset(AllocaSize + 1, true); 4416 for (Slice &S : AS) 4417 for (unsigned O = S.beginOffset() + 1; 4418 O < S.endOffset() && O < AllocaSize; O++) 4419 SplittableOffset.reset(O); 4420 4421 for (Slice &S : AS) { 4422 if (!S.isSplittable()) 4423 continue; 4424 4425 if ((S.beginOffset() > AllocaSize || SplittableOffset[S.beginOffset()]) && 4426 (S.endOffset() > AllocaSize || SplittableOffset[S.endOffset()])) 4427 continue; 4428 4429 if (isa<LoadInst>(S.getUse()->getUser()) || 4430 isa<StoreInst>(S.getUse()->getUser())) { 4431 S.makeUnsplittable(); 4432 IsSorted = false; 4433 } 4434 } 4435 } 4436 else { 4437 // We only allow whole-alloca splittable loads and stores 4438 // for a large alloca to avoid creating too large BitVector. 4439 for (Slice &S : AS) { 4440 if (!S.isSplittable()) 4441 continue; 4442 4443 if (S.beginOffset() == 0 && S.endOffset() >= AllocaSize) 4444 continue; 4445 4446 if (isa<LoadInst>(S.getUse()->getUser()) || 4447 isa<StoreInst>(S.getUse()->getUser())) { 4448 S.makeUnsplittable(); 4449 IsSorted = false; 4450 } 4451 } 4452 } 4453 4454 if (!IsSorted) 4455 llvm::sort(AS); 4456 4457 /// Describes the allocas introduced by rewritePartition in order to migrate 4458 /// the debug info. 4459 struct Fragment { 4460 AllocaInst *Alloca; 4461 uint64_t Offset; 4462 uint64_t Size; 4463 Fragment(AllocaInst *AI, uint64_t O, uint64_t S) 4464 : Alloca(AI), Offset(O), Size(S) {} 4465 }; 4466 SmallVector<Fragment, 4> Fragments; 4467 4468 // Rewrite each partition. 4469 for (auto &P : AS.partitions()) { 4470 if (AllocaInst *NewAI = rewritePartition(AI, AS, P)) { 4471 Changed = true; 4472 if (NewAI != &AI) { 4473 uint64_t SizeOfByte = 8; 4474 uint64_t AllocaSize = 4475 DL.getTypeSizeInBits(NewAI->getAllocatedType()).getFixedSize(); 4476 // Don't include any padding. 4477 uint64_t Size = std::min(AllocaSize, P.size() * SizeOfByte); 4478 Fragments.push_back(Fragment(NewAI, P.beginOffset() * SizeOfByte, Size)); 4479 } 4480 } 4481 ++NumPartitions; 4482 } 4483 4484 NumAllocaPartitions += NumPartitions; 4485 MaxPartitionsPerAlloca.updateMax(NumPartitions); 4486 4487 // Migrate debug information from the old alloca to the new alloca(s) 4488 // and the individual partitions. 4489 TinyPtrVector<DbgVariableIntrinsic *> DbgDeclares = FindDbgAddrUses(&AI); 4490 for (DbgVariableIntrinsic *DbgDeclare : DbgDeclares) { 4491 auto *Expr = DbgDeclare->getExpression(); 4492 DIBuilder DIB(*AI.getModule(), /*AllowUnresolved*/ false); 4493 uint64_t AllocaSize = 4494 DL.getTypeSizeInBits(AI.getAllocatedType()).getFixedSize(); 4495 for (auto Fragment : Fragments) { 4496 // Create a fragment expression describing the new partition or reuse AI's 4497 // expression if there is only one partition. 4498 auto *FragmentExpr = Expr; 4499 if (Fragment.Size < AllocaSize || Expr->isFragment()) { 4500 // If this alloca is already a scalar replacement of a larger aggregate, 4501 // Fragment.Offset describes the offset inside the scalar. 4502 auto ExprFragment = Expr->getFragmentInfo(); 4503 uint64_t Offset = ExprFragment ? ExprFragment->OffsetInBits : 0; 4504 uint64_t Start = Offset + Fragment.Offset; 4505 uint64_t Size = Fragment.Size; 4506 if (ExprFragment) { 4507 uint64_t AbsEnd = 4508 ExprFragment->OffsetInBits + ExprFragment->SizeInBits; 4509 if (Start >= AbsEnd) 4510 // No need to describe a SROAed padding. 4511 continue; 4512 Size = std::min(Size, AbsEnd - Start); 4513 } 4514 // The new, smaller fragment is stenciled out from the old fragment. 4515 if (auto OrigFragment = FragmentExpr->getFragmentInfo()) { 4516 assert(Start >= OrigFragment->OffsetInBits && 4517 "new fragment is outside of original fragment"); 4518 Start -= OrigFragment->OffsetInBits; 4519 } 4520 4521 // The alloca may be larger than the variable. 4522 auto VarSize = DbgDeclare->getVariable()->getSizeInBits(); 4523 if (VarSize) { 4524 if (Size > *VarSize) 4525 Size = *VarSize; 4526 if (Size == 0 || Start + Size > *VarSize) 4527 continue; 4528 } 4529 4530 // Avoid creating a fragment expression that covers the entire variable. 4531 if (!VarSize || *VarSize != Size) { 4532 if (auto E = 4533 DIExpression::createFragmentExpression(Expr, Start, Size)) 4534 FragmentExpr = *E; 4535 else 4536 continue; 4537 } 4538 } 4539 4540 // Remove any existing intrinsics on the new alloca describing 4541 // the variable fragment. 4542 for (DbgVariableIntrinsic *OldDII : FindDbgAddrUses(Fragment.Alloca)) { 4543 auto SameVariableFragment = [](const DbgVariableIntrinsic *LHS, 4544 const DbgVariableIntrinsic *RHS) { 4545 return LHS->getVariable() == RHS->getVariable() && 4546 LHS->getDebugLoc()->getInlinedAt() == 4547 RHS->getDebugLoc()->getInlinedAt(); 4548 }; 4549 if (SameVariableFragment(OldDII, DbgDeclare)) 4550 OldDII->eraseFromParent(); 4551 } 4552 4553 DIB.insertDeclare(Fragment.Alloca, DbgDeclare->getVariable(), FragmentExpr, 4554 DbgDeclare->getDebugLoc(), &AI); 4555 } 4556 } 4557 return Changed; 4558 } 4559 4560 /// Clobber a use with poison, deleting the used value if it becomes dead. 4561 void SROAPass::clobberUse(Use &U) { 4562 Value *OldV = U; 4563 // Replace the use with an poison value. 4564 U = PoisonValue::get(OldV->getType()); 4565 4566 // Check for this making an instruction dead. We have to garbage collect 4567 // all the dead instructions to ensure the uses of any alloca end up being 4568 // minimal. 4569 if (Instruction *OldI = dyn_cast<Instruction>(OldV)) 4570 if (isInstructionTriviallyDead(OldI)) { 4571 DeadInsts.push_back(OldI); 4572 } 4573 } 4574 4575 /// Analyze an alloca for SROA. 4576 /// 4577 /// This analyzes the alloca to ensure we can reason about it, builds 4578 /// the slices of the alloca, and then hands it off to be split and 4579 /// rewritten as needed. 4580 bool SROAPass::runOnAlloca(AllocaInst &AI) { 4581 LLVM_DEBUG(dbgs() << "SROA alloca: " << AI << "\n"); 4582 ++NumAllocasAnalyzed; 4583 4584 // Special case dead allocas, as they're trivial. 4585 if (AI.use_empty()) { 4586 AI.eraseFromParent(); 4587 return true; 4588 } 4589 const DataLayout &DL = AI.getModule()->getDataLayout(); 4590 4591 // Skip alloca forms that this analysis can't handle. 4592 auto *AT = AI.getAllocatedType(); 4593 if (AI.isArrayAllocation() || !AT->isSized() || isa<ScalableVectorType>(AT) || 4594 DL.getTypeAllocSize(AT).getFixedSize() == 0) 4595 return false; 4596 4597 bool Changed = false; 4598 4599 // First, split any FCA loads and stores touching this alloca to promote 4600 // better splitting and promotion opportunities. 4601 IRBuilderTy IRB(&AI); 4602 AggLoadStoreRewriter AggRewriter(DL, IRB); 4603 Changed |= AggRewriter.rewrite(AI); 4604 4605 // Build the slices using a recursive instruction-visiting builder. 4606 AllocaSlices AS(DL, AI); 4607 LLVM_DEBUG(AS.print(dbgs())); 4608 if (AS.isEscaped()) 4609 return Changed; 4610 4611 // Delete all the dead users of this alloca before splitting and rewriting it. 4612 for (Instruction *DeadUser : AS.getDeadUsers()) { 4613 // Free up everything used by this instruction. 4614 for (Use &DeadOp : DeadUser->operands()) 4615 clobberUse(DeadOp); 4616 4617 // Now replace the uses of this instruction. 4618 DeadUser->replaceAllUsesWith(PoisonValue::get(DeadUser->getType())); 4619 4620 // And mark it for deletion. 4621 DeadInsts.push_back(DeadUser); 4622 Changed = true; 4623 } 4624 for (Use *DeadOp : AS.getDeadOperands()) { 4625 clobberUse(*DeadOp); 4626 Changed = true; 4627 } 4628 4629 // No slices to split. Leave the dead alloca for a later pass to clean up. 4630 if (AS.begin() == AS.end()) 4631 return Changed; 4632 4633 Changed |= splitAlloca(AI, AS); 4634 4635 LLVM_DEBUG(dbgs() << " Speculating PHIs\n"); 4636 while (!SpeculatablePHIs.empty()) 4637 speculatePHINodeLoads(IRB, *SpeculatablePHIs.pop_back_val()); 4638 4639 LLVM_DEBUG(dbgs() << " Speculating Selects\n"); 4640 while (!SpeculatableSelects.empty()) 4641 speculateSelectInstLoads(IRB, *SpeculatableSelects.pop_back_val()); 4642 4643 return Changed; 4644 } 4645 4646 /// Delete the dead instructions accumulated in this run. 4647 /// 4648 /// Recursively deletes the dead instructions we've accumulated. This is done 4649 /// at the very end to maximize locality of the recursive delete and to 4650 /// minimize the problems of invalidated instruction pointers as such pointers 4651 /// are used heavily in the intermediate stages of the algorithm. 4652 /// 4653 /// We also record the alloca instructions deleted here so that they aren't 4654 /// subsequently handed to mem2reg to promote. 4655 bool SROAPass::deleteDeadInstructions( 4656 SmallPtrSetImpl<AllocaInst *> &DeletedAllocas) { 4657 bool Changed = false; 4658 while (!DeadInsts.empty()) { 4659 Instruction *I = dyn_cast_or_null<Instruction>(DeadInsts.pop_back_val()); 4660 if (!I) continue; 4661 LLVM_DEBUG(dbgs() << "Deleting dead instruction: " << *I << "\n"); 4662 4663 // If the instruction is an alloca, find the possible dbg.declare connected 4664 // to it, and remove it too. We must do this before calling RAUW or we will 4665 // not be able to find it. 4666 if (AllocaInst *AI = dyn_cast<AllocaInst>(I)) { 4667 DeletedAllocas.insert(AI); 4668 for (DbgVariableIntrinsic *OldDII : FindDbgAddrUses(AI)) 4669 OldDII->eraseFromParent(); 4670 } 4671 4672 I->replaceAllUsesWith(UndefValue::get(I->getType())); 4673 4674 for (Use &Operand : I->operands()) 4675 if (Instruction *U = dyn_cast<Instruction>(Operand)) { 4676 // Zero out the operand and see if it becomes trivially dead. 4677 Operand = nullptr; 4678 if (isInstructionTriviallyDead(U)) 4679 DeadInsts.push_back(U); 4680 } 4681 4682 ++NumDeleted; 4683 I->eraseFromParent(); 4684 Changed = true; 4685 } 4686 return Changed; 4687 } 4688 4689 /// Promote the allocas, using the best available technique. 4690 /// 4691 /// This attempts to promote whatever allocas have been identified as viable in 4692 /// the PromotableAllocas list. If that list is empty, there is nothing to do. 4693 /// This function returns whether any promotion occurred. 4694 bool SROAPass::promoteAllocas(Function &F) { 4695 if (PromotableAllocas.empty()) 4696 return false; 4697 4698 NumPromoted += PromotableAllocas.size(); 4699 4700 LLVM_DEBUG(dbgs() << "Promoting allocas with mem2reg...\n"); 4701 PromoteMemToReg(PromotableAllocas, *DT, AC); 4702 PromotableAllocas.clear(); 4703 return true; 4704 } 4705 4706 PreservedAnalyses SROAPass::runImpl(Function &F, DominatorTree &RunDT, 4707 AssumptionCache &RunAC) { 4708 LLVM_DEBUG(dbgs() << "SROA function: " << F.getName() << "\n"); 4709 C = &F.getContext(); 4710 DT = &RunDT; 4711 AC = &RunAC; 4712 4713 BasicBlock &EntryBB = F.getEntryBlock(); 4714 for (BasicBlock::iterator I = EntryBB.begin(), E = std::prev(EntryBB.end()); 4715 I != E; ++I) { 4716 if (AllocaInst *AI = dyn_cast<AllocaInst>(I)) { 4717 if (isa<ScalableVectorType>(AI->getAllocatedType())) { 4718 if (isAllocaPromotable(AI)) 4719 PromotableAllocas.push_back(AI); 4720 } else { 4721 Worklist.insert(AI); 4722 } 4723 } 4724 } 4725 4726 bool Changed = false; 4727 // A set of deleted alloca instruction pointers which should be removed from 4728 // the list of promotable allocas. 4729 SmallPtrSet<AllocaInst *, 4> DeletedAllocas; 4730 4731 do { 4732 while (!Worklist.empty()) { 4733 Changed |= runOnAlloca(*Worklist.pop_back_val()); 4734 Changed |= deleteDeadInstructions(DeletedAllocas); 4735 4736 // Remove the deleted allocas from various lists so that we don't try to 4737 // continue processing them. 4738 if (!DeletedAllocas.empty()) { 4739 auto IsInSet = [&](AllocaInst *AI) { return DeletedAllocas.count(AI); }; 4740 Worklist.remove_if(IsInSet); 4741 PostPromotionWorklist.remove_if(IsInSet); 4742 llvm::erase_if(PromotableAllocas, IsInSet); 4743 DeletedAllocas.clear(); 4744 } 4745 } 4746 4747 Changed |= promoteAllocas(F); 4748 4749 Worklist = PostPromotionWorklist; 4750 PostPromotionWorklist.clear(); 4751 } while (!Worklist.empty()); 4752 4753 if (!Changed) 4754 return PreservedAnalyses::all(); 4755 4756 PreservedAnalyses PA; 4757 PA.preserveSet<CFGAnalyses>(); 4758 return PA; 4759 } 4760 4761 PreservedAnalyses SROAPass::run(Function &F, FunctionAnalysisManager &AM) { 4762 return runImpl(F, AM.getResult<DominatorTreeAnalysis>(F), 4763 AM.getResult<AssumptionAnalysis>(F)); 4764 } 4765 4766 /// A legacy pass for the legacy pass manager that wraps the \c SROA pass. 4767 /// 4768 /// This is in the llvm namespace purely to allow it to be a friend of the \c 4769 /// SROA pass. 4770 class llvm::sroa::SROALegacyPass : public FunctionPass { 4771 /// The SROA implementation. 4772 SROAPass Impl; 4773 4774 public: 4775 static char ID; 4776 4777 SROALegacyPass() : FunctionPass(ID) { 4778 initializeSROALegacyPassPass(*PassRegistry::getPassRegistry()); 4779 } 4780 4781 bool runOnFunction(Function &F) override { 4782 if (skipFunction(F)) 4783 return false; 4784 4785 auto PA = Impl.runImpl( 4786 F, getAnalysis<DominatorTreeWrapperPass>().getDomTree(), 4787 getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F)); 4788 return !PA.areAllPreserved(); 4789 } 4790 4791 void getAnalysisUsage(AnalysisUsage &AU) const override { 4792 AU.addRequired<AssumptionCacheTracker>(); 4793 AU.addRequired<DominatorTreeWrapperPass>(); 4794 AU.addPreserved<GlobalsAAWrapperPass>(); 4795 AU.setPreservesCFG(); 4796 } 4797 4798 StringRef getPassName() const override { return "SROA"; } 4799 }; 4800 4801 char SROALegacyPass::ID = 0; 4802 4803 FunctionPass *llvm::createSROAPass() { return new SROALegacyPass(); } 4804 4805 INITIALIZE_PASS_BEGIN(SROALegacyPass, "sroa", 4806 "Scalar Replacement Of Aggregates", false, false) 4807 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 4808 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 4809 INITIALIZE_PASS_END(SROALegacyPass, "sroa", "Scalar Replacement Of Aggregates", 4810 false, false) 4811