1 //===- Scalarizer.cpp - Scalarize vector operations -----------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This pass converts vector operations into scalar operations, in order 10 // to expose optimization opportunities on the individual scalar operations. 11 // It is mainly intended for targets that do not have vector units, but it 12 // may also be useful for revectorizing code to different vector widths. 13 // 14 //===----------------------------------------------------------------------===// 15 16 #include "llvm/Transforms/Scalar/Scalarizer.h" 17 #include "llvm/ADT/PostOrderIterator.h" 18 #include "llvm/ADT/SmallVector.h" 19 #include "llvm/ADT/Twine.h" 20 #include "llvm/Analysis/VectorUtils.h" 21 #include "llvm/IR/Argument.h" 22 #include "llvm/IR/BasicBlock.h" 23 #include "llvm/IR/Constants.h" 24 #include "llvm/IR/DataLayout.h" 25 #include "llvm/IR/Dominators.h" 26 #include "llvm/IR/DerivedTypes.h" 27 #include "llvm/IR/Function.h" 28 #include "llvm/IR/IRBuilder.h" 29 #include "llvm/IR/InstVisitor.h" 30 #include "llvm/IR/InstrTypes.h" 31 #include "llvm/IR/Instruction.h" 32 #include "llvm/IR/Instructions.h" 33 #include "llvm/IR/Intrinsics.h" 34 #include "llvm/IR/LLVMContext.h" 35 #include "llvm/IR/Module.h" 36 #include "llvm/IR/Type.h" 37 #include "llvm/IR/Value.h" 38 #include "llvm/InitializePasses.h" 39 #include "llvm/Pass.h" 40 #include "llvm/Support/Casting.h" 41 #include "llvm/Support/CommandLine.h" 42 #include "llvm/Support/MathExtras.h" 43 #include "llvm/Transforms/Scalar.h" 44 #include <cassert> 45 #include <cstdint> 46 #include <iterator> 47 #include <map> 48 #include <utility> 49 50 using namespace llvm; 51 52 #define DEBUG_TYPE "scalarizer" 53 54 // This is disabled by default because having separate loads and stores 55 // makes it more likely that the -combiner-alias-analysis limits will be 56 // reached. 57 static cl::opt<bool> 58 ScalarizeLoadStore("scalarize-load-store", cl::init(false), cl::Hidden, 59 cl::desc("Allow the scalarizer pass to scalarize loads and store")); 60 61 namespace { 62 63 // Used to store the scattered form of a vector. 64 using ValueVector = SmallVector<Value *, 8>; 65 66 // Used to map a vector Value to its scattered form. We use std::map 67 // because we want iterators to persist across insertion and because the 68 // values are relatively large. 69 using ScatterMap = std::map<Value *, ValueVector>; 70 71 // Lists Instructions that have been replaced with scalar implementations, 72 // along with a pointer to their scattered forms. 73 using GatherList = SmallVector<std::pair<Instruction *, ValueVector *>, 16>; 74 75 // Provides a very limited vector-like interface for lazily accessing one 76 // component of a scattered vector or vector pointer. 77 class Scatterer { 78 public: 79 Scatterer() = default; 80 81 // Scatter V into Size components. If new instructions are needed, 82 // insert them before BBI in BB. If Cache is nonnull, use it to cache 83 // the results. 84 Scatterer(BasicBlock *bb, BasicBlock::iterator bbi, Value *v, 85 ValueVector *cachePtr = nullptr); 86 87 // Return component I, creating a new Value for it if necessary. 88 Value *operator[](unsigned I); 89 90 // Return the number of components. 91 unsigned size() const { return Size; } 92 93 private: 94 BasicBlock *BB; 95 BasicBlock::iterator BBI; 96 Value *V; 97 ValueVector *CachePtr; 98 PointerType *PtrTy; 99 ValueVector Tmp; 100 unsigned Size; 101 }; 102 103 // FCmpSpliiter(FCI)(Builder, X, Y, Name) uses Builder to create an FCmp 104 // called Name that compares X and Y in the same way as FCI. 105 struct FCmpSplitter { 106 FCmpSplitter(FCmpInst &fci) : FCI(fci) {} 107 108 Value *operator()(IRBuilder<> &Builder, Value *Op0, Value *Op1, 109 const Twine &Name) const { 110 return Builder.CreateFCmp(FCI.getPredicate(), Op0, Op1, Name); 111 } 112 113 FCmpInst &FCI; 114 }; 115 116 // ICmpSpliiter(ICI)(Builder, X, Y, Name) uses Builder to create an ICmp 117 // called Name that compares X and Y in the same way as ICI. 118 struct ICmpSplitter { 119 ICmpSplitter(ICmpInst &ici) : ICI(ici) {} 120 121 Value *operator()(IRBuilder<> &Builder, Value *Op0, Value *Op1, 122 const Twine &Name) const { 123 return Builder.CreateICmp(ICI.getPredicate(), Op0, Op1, Name); 124 } 125 126 ICmpInst &ICI; 127 }; 128 129 // UnarySpliiter(UO)(Builder, X, Name) uses Builder to create 130 // a unary operator like UO called Name with operand X. 131 struct UnarySplitter { 132 UnarySplitter(UnaryOperator &uo) : UO(uo) {} 133 134 Value *operator()(IRBuilder<> &Builder, Value *Op, const Twine &Name) const { 135 return Builder.CreateUnOp(UO.getOpcode(), Op, Name); 136 } 137 138 UnaryOperator &UO; 139 }; 140 141 // BinarySpliiter(BO)(Builder, X, Y, Name) uses Builder to create 142 // a binary operator like BO called Name with operands X and Y. 143 struct BinarySplitter { 144 BinarySplitter(BinaryOperator &bo) : BO(bo) {} 145 146 Value *operator()(IRBuilder<> &Builder, Value *Op0, Value *Op1, 147 const Twine &Name) const { 148 return Builder.CreateBinOp(BO.getOpcode(), Op0, Op1, Name); 149 } 150 151 BinaryOperator &BO; 152 }; 153 154 // Information about a load or store that we're scalarizing. 155 struct VectorLayout { 156 VectorLayout() = default; 157 158 // Return the alignment of element I. 159 uint64_t getElemAlign(unsigned I) { 160 return MinAlign(VecAlign, I * ElemSize); 161 } 162 163 // The type of the vector. 164 VectorType *VecTy = nullptr; 165 166 // The type of each element. 167 Type *ElemTy = nullptr; 168 169 // The alignment of the vector. 170 uint64_t VecAlign = 0; 171 172 // The size of each element. 173 uint64_t ElemSize = 0; 174 }; 175 176 class ScalarizerVisitor : public InstVisitor<ScalarizerVisitor, bool> { 177 public: 178 ScalarizerVisitor(unsigned ParallelLoopAccessMDKind, DominatorTree *DT) 179 : ParallelLoopAccessMDKind(ParallelLoopAccessMDKind), DT(DT) { 180 } 181 182 bool visit(Function &F); 183 184 // InstVisitor methods. They return true if the instruction was scalarized, 185 // false if nothing changed. 186 bool visitInstruction(Instruction &I) { return false; } 187 bool visitSelectInst(SelectInst &SI); 188 bool visitICmpInst(ICmpInst &ICI); 189 bool visitFCmpInst(FCmpInst &FCI); 190 bool visitUnaryOperator(UnaryOperator &UO); 191 bool visitBinaryOperator(BinaryOperator &BO); 192 bool visitGetElementPtrInst(GetElementPtrInst &GEPI); 193 bool visitCastInst(CastInst &CI); 194 bool visitBitCastInst(BitCastInst &BCI); 195 bool visitShuffleVectorInst(ShuffleVectorInst &SVI); 196 bool visitPHINode(PHINode &PHI); 197 bool visitLoadInst(LoadInst &LI); 198 bool visitStoreInst(StoreInst &SI); 199 bool visitCallInst(CallInst &ICI); 200 201 private: 202 Scatterer scatter(Instruction *Point, Value *V); 203 void gather(Instruction *Op, const ValueVector &CV); 204 bool canTransferMetadata(unsigned Kind); 205 void transferMetadataAndIRFlags(Instruction *Op, const ValueVector &CV); 206 bool getVectorLayout(Type *Ty, unsigned Alignment, VectorLayout &Layout, 207 const DataLayout &DL); 208 bool finish(); 209 210 template<typename T> bool splitUnary(Instruction &, const T &); 211 template<typename T> bool splitBinary(Instruction &, const T &); 212 213 bool splitCall(CallInst &CI); 214 215 ScatterMap Scattered; 216 GatherList Gathered; 217 218 unsigned ParallelLoopAccessMDKind; 219 220 DominatorTree *DT; 221 }; 222 223 class ScalarizerLegacyPass : public FunctionPass { 224 public: 225 static char ID; 226 227 ScalarizerLegacyPass() : FunctionPass(ID) { 228 initializeScalarizerLegacyPassPass(*PassRegistry::getPassRegistry()); 229 } 230 231 bool runOnFunction(Function &F) override; 232 233 void getAnalysisUsage(AnalysisUsage& AU) const override { 234 AU.addRequired<DominatorTreeWrapperPass>(); 235 AU.addPreserved<DominatorTreeWrapperPass>(); 236 } 237 }; 238 239 } // end anonymous namespace 240 241 char ScalarizerLegacyPass::ID = 0; 242 INITIALIZE_PASS_BEGIN(ScalarizerLegacyPass, "scalarizer", 243 "Scalarize vector operations", false, false) 244 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 245 INITIALIZE_PASS_END(ScalarizerLegacyPass, "scalarizer", 246 "Scalarize vector operations", false, false) 247 248 Scatterer::Scatterer(BasicBlock *bb, BasicBlock::iterator bbi, Value *v, 249 ValueVector *cachePtr) 250 : BB(bb), BBI(bbi), V(v), CachePtr(cachePtr) { 251 Type *Ty = V->getType(); 252 PtrTy = dyn_cast<PointerType>(Ty); 253 if (PtrTy) 254 Ty = PtrTy->getElementType(); 255 Size = cast<VectorType>(Ty)->getNumElements(); 256 if (!CachePtr) 257 Tmp.resize(Size, nullptr); 258 else if (CachePtr->empty()) 259 CachePtr->resize(Size, nullptr); 260 else 261 assert(Size == CachePtr->size() && "Inconsistent vector sizes"); 262 } 263 264 // Return component I, creating a new Value for it if necessary. 265 Value *Scatterer::operator[](unsigned I) { 266 ValueVector &CV = (CachePtr ? *CachePtr : Tmp); 267 // Try to reuse a previous value. 268 if (CV[I]) 269 return CV[I]; 270 IRBuilder<> Builder(BB, BBI); 271 if (PtrTy) { 272 Type *ElTy = cast<VectorType>(PtrTy->getElementType())->getElementType(); 273 if (!CV[0]) { 274 Type *NewPtrTy = PointerType::get(ElTy, PtrTy->getAddressSpace()); 275 CV[0] = Builder.CreateBitCast(V, NewPtrTy, V->getName() + ".i0"); 276 } 277 if (I != 0) 278 CV[I] = Builder.CreateConstGEP1_32(ElTy, CV[0], I, 279 V->getName() + ".i" + Twine(I)); 280 } else { 281 // Search through a chain of InsertElementInsts looking for element I. 282 // Record other elements in the cache. The new V is still suitable 283 // for all uncached indices. 284 while (true) { 285 InsertElementInst *Insert = dyn_cast<InsertElementInst>(V); 286 if (!Insert) 287 break; 288 ConstantInt *Idx = dyn_cast<ConstantInt>(Insert->getOperand(2)); 289 if (!Idx) 290 break; 291 unsigned J = Idx->getZExtValue(); 292 V = Insert->getOperand(0); 293 if (I == J) { 294 CV[J] = Insert->getOperand(1); 295 return CV[J]; 296 } else if (!CV[J]) { 297 // Only cache the first entry we find for each index we're not actively 298 // searching for. This prevents us from going too far up the chain and 299 // caching incorrect entries. 300 CV[J] = Insert->getOperand(1); 301 } 302 } 303 CV[I] = Builder.CreateExtractElement(V, Builder.getInt32(I), 304 V->getName() + ".i" + Twine(I)); 305 } 306 return CV[I]; 307 } 308 309 bool ScalarizerLegacyPass::runOnFunction(Function &F) { 310 if (skipFunction(F)) 311 return false; 312 313 Module &M = *F.getParent(); 314 unsigned ParallelLoopAccessMDKind = 315 M.getContext().getMDKindID("llvm.mem.parallel_loop_access"); 316 DominatorTree *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 317 ScalarizerVisitor Impl(ParallelLoopAccessMDKind, DT); 318 return Impl.visit(F); 319 } 320 321 FunctionPass *llvm::createScalarizerPass() { 322 return new ScalarizerLegacyPass(); 323 } 324 325 bool ScalarizerVisitor::visit(Function &F) { 326 assert(Gathered.empty() && Scattered.empty()); 327 328 // To ensure we replace gathered components correctly we need to do an ordered 329 // traversal of the basic blocks in the function. 330 ReversePostOrderTraversal<BasicBlock *> RPOT(&F.getEntryBlock()); 331 for (BasicBlock *BB : RPOT) { 332 for (BasicBlock::iterator II = BB->begin(), IE = BB->end(); II != IE;) { 333 Instruction *I = &*II; 334 bool Done = InstVisitor::visit(I); 335 ++II; 336 if (Done && I->getType()->isVoidTy()) 337 I->eraseFromParent(); 338 } 339 } 340 return finish(); 341 } 342 343 // Return a scattered form of V that can be accessed by Point. V must be a 344 // vector or a pointer to a vector. 345 Scatterer ScalarizerVisitor::scatter(Instruction *Point, Value *V) { 346 if (Argument *VArg = dyn_cast<Argument>(V)) { 347 // Put the scattered form of arguments in the entry block, 348 // so that it can be used everywhere. 349 Function *F = VArg->getParent(); 350 BasicBlock *BB = &F->getEntryBlock(); 351 return Scatterer(BB, BB->begin(), V, &Scattered[V]); 352 } 353 if (Instruction *VOp = dyn_cast<Instruction>(V)) { 354 // When scalarizing PHI nodes we might try to examine/rewrite InsertElement 355 // nodes in predecessors. If those predecessors are unreachable from entry, 356 // then the IR in those blocks could have unexpected properties resulting in 357 // infinite loops in Scatterer::operator[]. By simply treating values 358 // originating from instructions in unreachable blocks as undef we do not 359 // need to analyse them further. 360 if (!DT->isReachableFromEntry(VOp->getParent())) 361 return Scatterer(Point->getParent(), Point->getIterator(), 362 UndefValue::get(V->getType())); 363 // Put the scattered form of an instruction directly after the 364 // instruction. 365 BasicBlock *BB = VOp->getParent(); 366 return Scatterer(BB, std::next(BasicBlock::iterator(VOp)), 367 V, &Scattered[V]); 368 } 369 // In the fallback case, just put the scattered before Point and 370 // keep the result local to Point. 371 return Scatterer(Point->getParent(), Point->getIterator(), V); 372 } 373 374 // Replace Op with the gathered form of the components in CV. Defer the 375 // deletion of Op and creation of the gathered form to the end of the pass, 376 // so that we can avoid creating the gathered form if all uses of Op are 377 // replaced with uses of CV. 378 void ScalarizerVisitor::gather(Instruction *Op, const ValueVector &CV) { 379 // Since we're not deleting Op yet, stub out its operands, so that it 380 // doesn't make anything live unnecessarily. 381 for (unsigned I = 0, E = Op->getNumOperands(); I != E; ++I) 382 Op->setOperand(I, UndefValue::get(Op->getOperand(I)->getType())); 383 384 transferMetadataAndIRFlags(Op, CV); 385 386 // If we already have a scattered form of Op (created from ExtractElements 387 // of Op itself), replace them with the new form. 388 ValueVector &SV = Scattered[Op]; 389 if (!SV.empty()) { 390 for (unsigned I = 0, E = SV.size(); I != E; ++I) { 391 Value *V = SV[I]; 392 if (V == nullptr) 393 continue; 394 395 Instruction *Old = cast<Instruction>(V); 396 CV[I]->takeName(Old); 397 Old->replaceAllUsesWith(CV[I]); 398 Old->eraseFromParent(); 399 } 400 } 401 SV = CV; 402 Gathered.push_back(GatherList::value_type(Op, &SV)); 403 } 404 405 // Return true if it is safe to transfer the given metadata tag from 406 // vector to scalar instructions. 407 bool ScalarizerVisitor::canTransferMetadata(unsigned Tag) { 408 return (Tag == LLVMContext::MD_tbaa 409 || Tag == LLVMContext::MD_fpmath 410 || Tag == LLVMContext::MD_tbaa_struct 411 || Tag == LLVMContext::MD_invariant_load 412 || Tag == LLVMContext::MD_alias_scope 413 || Tag == LLVMContext::MD_noalias 414 || Tag == ParallelLoopAccessMDKind 415 || Tag == LLVMContext::MD_access_group); 416 } 417 418 // Transfer metadata from Op to the instructions in CV if it is known 419 // to be safe to do so. 420 void ScalarizerVisitor::transferMetadataAndIRFlags(Instruction *Op, 421 const ValueVector &CV) { 422 SmallVector<std::pair<unsigned, MDNode *>, 4> MDs; 423 Op->getAllMetadataOtherThanDebugLoc(MDs); 424 for (unsigned I = 0, E = CV.size(); I != E; ++I) { 425 if (Instruction *New = dyn_cast<Instruction>(CV[I])) { 426 for (const auto &MD : MDs) 427 if (canTransferMetadata(MD.first)) 428 New->setMetadata(MD.first, MD.second); 429 New->copyIRFlags(Op); 430 if (Op->getDebugLoc() && !New->getDebugLoc()) 431 New->setDebugLoc(Op->getDebugLoc()); 432 } 433 } 434 } 435 436 // Try to fill in Layout from Ty, returning true on success. Alignment is 437 // the alignment of the vector, or 0 if the ABI default should be used. 438 bool ScalarizerVisitor::getVectorLayout(Type *Ty, unsigned Alignment, 439 VectorLayout &Layout, const DataLayout &DL) { 440 // Make sure we're dealing with a vector. 441 Layout.VecTy = dyn_cast<VectorType>(Ty); 442 if (!Layout.VecTy) 443 return false; 444 445 // Check that we're dealing with full-byte elements. 446 Layout.ElemTy = Layout.VecTy->getElementType(); 447 if (!DL.typeSizeEqualsStoreSize(Layout.ElemTy)) 448 return false; 449 450 if (Alignment) 451 Layout.VecAlign = Alignment; 452 else 453 Layout.VecAlign = DL.getABITypeAlignment(Layout.VecTy); 454 Layout.ElemSize = DL.getTypeStoreSize(Layout.ElemTy); 455 return true; 456 } 457 458 // Scalarize one-operand instruction I, using Split(Builder, X, Name) 459 // to create an instruction like I with operand X and name Name. 460 template<typename Splitter> 461 bool ScalarizerVisitor::splitUnary(Instruction &I, const Splitter &Split) { 462 VectorType *VT = dyn_cast<VectorType>(I.getType()); 463 if (!VT) 464 return false; 465 466 unsigned NumElems = VT->getNumElements(); 467 IRBuilder<> Builder(&I); 468 Scatterer Op = scatter(&I, I.getOperand(0)); 469 assert(Op.size() == NumElems && "Mismatched unary operation"); 470 ValueVector Res; 471 Res.resize(NumElems); 472 for (unsigned Elem = 0; Elem < NumElems; ++Elem) 473 Res[Elem] = Split(Builder, Op[Elem], I.getName() + ".i" + Twine(Elem)); 474 gather(&I, Res); 475 return true; 476 } 477 478 // Scalarize two-operand instruction I, using Split(Builder, X, Y, Name) 479 // to create an instruction like I with operands X and Y and name Name. 480 template<typename Splitter> 481 bool ScalarizerVisitor::splitBinary(Instruction &I, const Splitter &Split) { 482 VectorType *VT = dyn_cast<VectorType>(I.getType()); 483 if (!VT) 484 return false; 485 486 unsigned NumElems = VT->getNumElements(); 487 IRBuilder<> Builder(&I); 488 Scatterer VOp0 = scatter(&I, I.getOperand(0)); 489 Scatterer VOp1 = scatter(&I, I.getOperand(1)); 490 assert(VOp0.size() == NumElems && "Mismatched binary operation"); 491 assert(VOp1.size() == NumElems && "Mismatched binary operation"); 492 ValueVector Res; 493 Res.resize(NumElems); 494 for (unsigned Elem = 0; Elem < NumElems; ++Elem) { 495 Value *Op0 = VOp0[Elem]; 496 Value *Op1 = VOp1[Elem]; 497 Res[Elem] = Split(Builder, Op0, Op1, I.getName() + ".i" + Twine(Elem)); 498 } 499 gather(&I, Res); 500 return true; 501 } 502 503 static bool isTriviallyScalariable(Intrinsic::ID ID) { 504 return isTriviallyVectorizable(ID); 505 } 506 507 // All of the current scalarizable intrinsics only have one mangled type. 508 static Function *getScalarIntrinsicDeclaration(Module *M, 509 Intrinsic::ID ID, 510 VectorType *Ty) { 511 return Intrinsic::getDeclaration(M, ID, { Ty->getScalarType() }); 512 } 513 514 /// If a call to a vector typed intrinsic function, split into a scalar call per 515 /// element if possible for the intrinsic. 516 bool ScalarizerVisitor::splitCall(CallInst &CI) { 517 VectorType *VT = dyn_cast<VectorType>(CI.getType()); 518 if (!VT) 519 return false; 520 521 Function *F = CI.getCalledFunction(); 522 if (!F) 523 return false; 524 525 Intrinsic::ID ID = F->getIntrinsicID(); 526 if (ID == Intrinsic::not_intrinsic || !isTriviallyScalariable(ID)) 527 return false; 528 529 unsigned NumElems = VT->getNumElements(); 530 unsigned NumArgs = CI.getNumArgOperands(); 531 532 ValueVector ScalarOperands(NumArgs); 533 SmallVector<Scatterer, 8> Scattered(NumArgs); 534 535 Scattered.resize(NumArgs); 536 537 // Assumes that any vector type has the same number of elements as the return 538 // vector type, which is true for all current intrinsics. 539 for (unsigned I = 0; I != NumArgs; ++I) { 540 Value *OpI = CI.getOperand(I); 541 if (OpI->getType()->isVectorTy()) { 542 Scattered[I] = scatter(&CI, OpI); 543 assert(Scattered[I].size() == NumElems && "mismatched call operands"); 544 } else { 545 ScalarOperands[I] = OpI; 546 } 547 } 548 549 ValueVector Res(NumElems); 550 ValueVector ScalarCallOps(NumArgs); 551 552 Function *NewIntrin = getScalarIntrinsicDeclaration(F->getParent(), ID, VT); 553 IRBuilder<> Builder(&CI); 554 555 // Perform actual scalarization, taking care to preserve any scalar operands. 556 for (unsigned Elem = 0; Elem < NumElems; ++Elem) { 557 ScalarCallOps.clear(); 558 559 for (unsigned J = 0; J != NumArgs; ++J) { 560 if (hasVectorInstrinsicScalarOpd(ID, J)) 561 ScalarCallOps.push_back(ScalarOperands[J]); 562 else 563 ScalarCallOps.push_back(Scattered[J][Elem]); 564 } 565 566 Res[Elem] = Builder.CreateCall(NewIntrin, ScalarCallOps, 567 CI.getName() + ".i" + Twine(Elem)); 568 } 569 570 gather(&CI, Res); 571 return true; 572 } 573 574 bool ScalarizerVisitor::visitSelectInst(SelectInst &SI) { 575 VectorType *VT = dyn_cast<VectorType>(SI.getType()); 576 if (!VT) 577 return false; 578 579 unsigned NumElems = VT->getNumElements(); 580 IRBuilder<> Builder(&SI); 581 Scatterer VOp1 = scatter(&SI, SI.getOperand(1)); 582 Scatterer VOp2 = scatter(&SI, SI.getOperand(2)); 583 assert(VOp1.size() == NumElems && "Mismatched select"); 584 assert(VOp2.size() == NumElems && "Mismatched select"); 585 ValueVector Res; 586 Res.resize(NumElems); 587 588 if (SI.getOperand(0)->getType()->isVectorTy()) { 589 Scatterer VOp0 = scatter(&SI, SI.getOperand(0)); 590 assert(VOp0.size() == NumElems && "Mismatched select"); 591 for (unsigned I = 0; I < NumElems; ++I) { 592 Value *Op0 = VOp0[I]; 593 Value *Op1 = VOp1[I]; 594 Value *Op2 = VOp2[I]; 595 Res[I] = Builder.CreateSelect(Op0, Op1, Op2, 596 SI.getName() + ".i" + Twine(I)); 597 } 598 } else { 599 Value *Op0 = SI.getOperand(0); 600 for (unsigned I = 0; I < NumElems; ++I) { 601 Value *Op1 = VOp1[I]; 602 Value *Op2 = VOp2[I]; 603 Res[I] = Builder.CreateSelect(Op0, Op1, Op2, 604 SI.getName() + ".i" + Twine(I)); 605 } 606 } 607 gather(&SI, Res); 608 return true; 609 } 610 611 bool ScalarizerVisitor::visitICmpInst(ICmpInst &ICI) { 612 return splitBinary(ICI, ICmpSplitter(ICI)); 613 } 614 615 bool ScalarizerVisitor::visitFCmpInst(FCmpInst &FCI) { 616 return splitBinary(FCI, FCmpSplitter(FCI)); 617 } 618 619 bool ScalarizerVisitor::visitUnaryOperator(UnaryOperator &UO) { 620 return splitUnary(UO, UnarySplitter(UO)); 621 } 622 623 bool ScalarizerVisitor::visitBinaryOperator(BinaryOperator &BO) { 624 return splitBinary(BO, BinarySplitter(BO)); 625 } 626 627 bool ScalarizerVisitor::visitGetElementPtrInst(GetElementPtrInst &GEPI) { 628 VectorType *VT = dyn_cast<VectorType>(GEPI.getType()); 629 if (!VT) 630 return false; 631 632 IRBuilder<> Builder(&GEPI); 633 unsigned NumElems = VT->getNumElements(); 634 unsigned NumIndices = GEPI.getNumIndices(); 635 636 // The base pointer might be scalar even if it's a vector GEP. In those cases, 637 // splat the pointer into a vector value, and scatter that vector. 638 Value *Op0 = GEPI.getOperand(0); 639 if (!Op0->getType()->isVectorTy()) 640 Op0 = Builder.CreateVectorSplat(NumElems, Op0); 641 Scatterer Base = scatter(&GEPI, Op0); 642 643 SmallVector<Scatterer, 8> Ops; 644 Ops.resize(NumIndices); 645 for (unsigned I = 0; I < NumIndices; ++I) { 646 Value *Op = GEPI.getOperand(I + 1); 647 648 // The indices might be scalars even if it's a vector GEP. In those cases, 649 // splat the scalar into a vector value, and scatter that vector. 650 if (!Op->getType()->isVectorTy()) 651 Op = Builder.CreateVectorSplat(NumElems, Op); 652 653 Ops[I] = scatter(&GEPI, Op); 654 } 655 656 ValueVector Res; 657 Res.resize(NumElems); 658 for (unsigned I = 0; I < NumElems; ++I) { 659 SmallVector<Value *, 8> Indices; 660 Indices.resize(NumIndices); 661 for (unsigned J = 0; J < NumIndices; ++J) 662 Indices[J] = Ops[J][I]; 663 Res[I] = Builder.CreateGEP(GEPI.getSourceElementType(), Base[I], Indices, 664 GEPI.getName() + ".i" + Twine(I)); 665 if (GEPI.isInBounds()) 666 if (GetElementPtrInst *NewGEPI = dyn_cast<GetElementPtrInst>(Res[I])) 667 NewGEPI->setIsInBounds(); 668 } 669 gather(&GEPI, Res); 670 return true; 671 } 672 673 bool ScalarizerVisitor::visitCastInst(CastInst &CI) { 674 VectorType *VT = dyn_cast<VectorType>(CI.getDestTy()); 675 if (!VT) 676 return false; 677 678 unsigned NumElems = VT->getNumElements(); 679 IRBuilder<> Builder(&CI); 680 Scatterer Op0 = scatter(&CI, CI.getOperand(0)); 681 assert(Op0.size() == NumElems && "Mismatched cast"); 682 ValueVector Res; 683 Res.resize(NumElems); 684 for (unsigned I = 0; I < NumElems; ++I) 685 Res[I] = Builder.CreateCast(CI.getOpcode(), Op0[I], VT->getElementType(), 686 CI.getName() + ".i" + Twine(I)); 687 gather(&CI, Res); 688 return true; 689 } 690 691 bool ScalarizerVisitor::visitBitCastInst(BitCastInst &BCI) { 692 VectorType *DstVT = dyn_cast<VectorType>(BCI.getDestTy()); 693 VectorType *SrcVT = dyn_cast<VectorType>(BCI.getSrcTy()); 694 if (!DstVT || !SrcVT) 695 return false; 696 697 unsigned DstNumElems = DstVT->getNumElements(); 698 unsigned SrcNumElems = SrcVT->getNumElements(); 699 IRBuilder<> Builder(&BCI); 700 Scatterer Op0 = scatter(&BCI, BCI.getOperand(0)); 701 ValueVector Res; 702 Res.resize(DstNumElems); 703 704 if (DstNumElems == SrcNumElems) { 705 for (unsigned I = 0; I < DstNumElems; ++I) 706 Res[I] = Builder.CreateBitCast(Op0[I], DstVT->getElementType(), 707 BCI.getName() + ".i" + Twine(I)); 708 } else if (DstNumElems > SrcNumElems) { 709 // <M x t1> -> <N*M x t2>. Convert each t1 to <N x t2> and copy the 710 // individual elements to the destination. 711 unsigned FanOut = DstNumElems / SrcNumElems; 712 Type *MidTy = VectorType::get(DstVT->getElementType(), FanOut); 713 unsigned ResI = 0; 714 for (unsigned Op0I = 0; Op0I < SrcNumElems; ++Op0I) { 715 Value *V = Op0[Op0I]; 716 Instruction *VI; 717 // Look through any existing bitcasts before converting to <N x t2>. 718 // In the best case, the resulting conversion might be a no-op. 719 while ((VI = dyn_cast<Instruction>(V)) && 720 VI->getOpcode() == Instruction::BitCast) 721 V = VI->getOperand(0); 722 V = Builder.CreateBitCast(V, MidTy, V->getName() + ".cast"); 723 Scatterer Mid = scatter(&BCI, V); 724 for (unsigned MidI = 0; MidI < FanOut; ++MidI) 725 Res[ResI++] = Mid[MidI]; 726 } 727 } else { 728 // <N*M x t1> -> <M x t2>. Convert each group of <N x t1> into a t2. 729 unsigned FanIn = SrcNumElems / DstNumElems; 730 Type *MidTy = VectorType::get(SrcVT->getElementType(), FanIn); 731 unsigned Op0I = 0; 732 for (unsigned ResI = 0; ResI < DstNumElems; ++ResI) { 733 Value *V = UndefValue::get(MidTy); 734 for (unsigned MidI = 0; MidI < FanIn; ++MidI) 735 V = Builder.CreateInsertElement(V, Op0[Op0I++], Builder.getInt32(MidI), 736 BCI.getName() + ".i" + Twine(ResI) 737 + ".upto" + Twine(MidI)); 738 Res[ResI] = Builder.CreateBitCast(V, DstVT->getElementType(), 739 BCI.getName() + ".i" + Twine(ResI)); 740 } 741 } 742 gather(&BCI, Res); 743 return true; 744 } 745 746 bool ScalarizerVisitor::visitShuffleVectorInst(ShuffleVectorInst &SVI) { 747 VectorType *VT = dyn_cast<VectorType>(SVI.getType()); 748 if (!VT) 749 return false; 750 751 unsigned NumElems = VT->getNumElements(); 752 Scatterer Op0 = scatter(&SVI, SVI.getOperand(0)); 753 Scatterer Op1 = scatter(&SVI, SVI.getOperand(1)); 754 ValueVector Res; 755 Res.resize(NumElems); 756 757 for (unsigned I = 0; I < NumElems; ++I) { 758 int Selector = SVI.getMaskValue(I); 759 if (Selector < 0) 760 Res[I] = UndefValue::get(VT->getElementType()); 761 else if (unsigned(Selector) < Op0.size()) 762 Res[I] = Op0[Selector]; 763 else 764 Res[I] = Op1[Selector - Op0.size()]; 765 } 766 gather(&SVI, Res); 767 return true; 768 } 769 770 bool ScalarizerVisitor::visitPHINode(PHINode &PHI) { 771 VectorType *VT = dyn_cast<VectorType>(PHI.getType()); 772 if (!VT) 773 return false; 774 775 unsigned NumElems = VT->getNumElements(); 776 IRBuilder<> Builder(&PHI); 777 ValueVector Res; 778 Res.resize(NumElems); 779 780 unsigned NumOps = PHI.getNumOperands(); 781 for (unsigned I = 0; I < NumElems; ++I) 782 Res[I] = Builder.CreatePHI(VT->getElementType(), NumOps, 783 PHI.getName() + ".i" + Twine(I)); 784 785 for (unsigned I = 0; I < NumOps; ++I) { 786 Scatterer Op = scatter(&PHI, PHI.getIncomingValue(I)); 787 BasicBlock *IncomingBlock = PHI.getIncomingBlock(I); 788 for (unsigned J = 0; J < NumElems; ++J) 789 cast<PHINode>(Res[J])->addIncoming(Op[J], IncomingBlock); 790 } 791 gather(&PHI, Res); 792 return true; 793 } 794 795 bool ScalarizerVisitor::visitLoadInst(LoadInst &LI) { 796 if (!ScalarizeLoadStore) 797 return false; 798 if (!LI.isSimple()) 799 return false; 800 801 VectorLayout Layout; 802 if (!getVectorLayout(LI.getType(), LI.getAlignment(), Layout, 803 LI.getModule()->getDataLayout())) 804 return false; 805 806 unsigned NumElems = Layout.VecTy->getNumElements(); 807 IRBuilder<> Builder(&LI); 808 Scatterer Ptr = scatter(&LI, LI.getPointerOperand()); 809 ValueVector Res; 810 Res.resize(NumElems); 811 812 for (unsigned I = 0; I < NumElems; ++I) 813 Res[I] = Builder.CreateAlignedLoad(Layout.VecTy->getElementType(), Ptr[I], 814 Align(Layout.getElemAlign(I)), 815 LI.getName() + ".i" + Twine(I)); 816 gather(&LI, Res); 817 return true; 818 } 819 820 bool ScalarizerVisitor::visitStoreInst(StoreInst &SI) { 821 if (!ScalarizeLoadStore) 822 return false; 823 if (!SI.isSimple()) 824 return false; 825 826 VectorLayout Layout; 827 Value *FullValue = SI.getValueOperand(); 828 if (!getVectorLayout(FullValue->getType(), SI.getAlignment(), Layout, 829 SI.getModule()->getDataLayout())) 830 return false; 831 832 unsigned NumElems = Layout.VecTy->getNumElements(); 833 IRBuilder<> Builder(&SI); 834 Scatterer VPtr = scatter(&SI, SI.getPointerOperand()); 835 Scatterer VVal = scatter(&SI, FullValue); 836 837 ValueVector Stores; 838 Stores.resize(NumElems); 839 for (unsigned I = 0; I < NumElems; ++I) { 840 unsigned Align = Layout.getElemAlign(I); 841 Value *Val = VVal[I]; 842 Value *Ptr = VPtr[I]; 843 Stores[I] = Builder.CreateAlignedStore(Val, Ptr, MaybeAlign(Align)); 844 } 845 transferMetadataAndIRFlags(&SI, Stores); 846 return true; 847 } 848 849 bool ScalarizerVisitor::visitCallInst(CallInst &CI) { 850 return splitCall(CI); 851 } 852 853 // Delete the instructions that we scalarized. If a full vector result 854 // is still needed, recreate it using InsertElements. 855 bool ScalarizerVisitor::finish() { 856 // The presence of data in Gathered or Scattered indicates changes 857 // made to the Function. 858 if (Gathered.empty() && Scattered.empty()) 859 return false; 860 for (const auto &GMI : Gathered) { 861 Instruction *Op = GMI.first; 862 ValueVector &CV = *GMI.second; 863 if (!Op->use_empty()) { 864 // The value is still needed, so recreate it using a series of 865 // InsertElements. 866 auto *Ty = cast<VectorType>(Op->getType()); 867 Value *Res = UndefValue::get(Ty); 868 BasicBlock *BB = Op->getParent(); 869 unsigned Count = Ty->getNumElements(); 870 IRBuilder<> Builder(Op); 871 if (isa<PHINode>(Op)) 872 Builder.SetInsertPoint(BB, BB->getFirstInsertionPt()); 873 for (unsigned I = 0; I < Count; ++I) 874 Res = Builder.CreateInsertElement(Res, CV[I], Builder.getInt32(I), 875 Op->getName() + ".upto" + Twine(I)); 876 Res->takeName(Op); 877 Op->replaceAllUsesWith(Res); 878 } 879 Op->eraseFromParent(); 880 } 881 Gathered.clear(); 882 Scattered.clear(); 883 return true; 884 } 885 886 PreservedAnalyses ScalarizerPass::run(Function &F, FunctionAnalysisManager &AM) { 887 Module &M = *F.getParent(); 888 unsigned ParallelLoopAccessMDKind = 889 M.getContext().getMDKindID("llvm.mem.parallel_loop_access"); 890 DominatorTree *DT = &AM.getResult<DominatorTreeAnalysis>(F); 891 ScalarizerVisitor Impl(ParallelLoopAccessMDKind, DT); 892 bool Changed = Impl.visit(F); 893 PreservedAnalyses PA; 894 PA.preserve<DominatorTreeAnalysis>(); 895 return Changed ? PA : PreservedAnalyses::all(); 896 } 897