1 //===--- Scalarizer.cpp - Scalarize vector operations ---------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This pass converts vector operations into scalar operations, in order 11 // to expose optimization opportunities on the individual scalar operations. 12 // It is mainly intended for targets that do not have vector units, but it 13 // may also be useful for revectorizing code to different vector widths. 14 // 15 //===----------------------------------------------------------------------===// 16 17 #include "llvm/Transforms/Scalar.h" 18 #include "llvm/ADT/STLExtras.h" 19 #include "llvm/Analysis/VectorUtils.h" 20 #include "llvm/IR/IRBuilder.h" 21 #include "llvm/IR/InstVisitor.h" 22 #include "llvm/Pass.h" 23 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 24 25 using namespace llvm; 26 27 #define DEBUG_TYPE "scalarizer" 28 29 namespace { 30 // Used to store the scattered form of a vector. 31 typedef SmallVector<Value *, 8> ValueVector; 32 33 // Used to map a vector Value to its scattered form. We use std::map 34 // because we want iterators to persist across insertion and because the 35 // values are relatively large. 36 typedef std::map<Value *, ValueVector> ScatterMap; 37 38 // Lists Instructions that have been replaced with scalar implementations, 39 // along with a pointer to their scattered forms. 40 typedef SmallVector<std::pair<Instruction *, ValueVector *>, 16> GatherList; 41 42 // Provides a very limited vector-like interface for lazily accessing one 43 // component of a scattered vector or vector pointer. 44 class Scatterer { 45 public: 46 Scatterer() {} 47 48 // Scatter V into Size components. If new instructions are needed, 49 // insert them before BBI in BB. If Cache is nonnull, use it to cache 50 // the results. 51 Scatterer(BasicBlock *bb, BasicBlock::iterator bbi, Value *v, 52 ValueVector *cachePtr = nullptr); 53 54 // Return component I, creating a new Value for it if necessary. 55 Value *operator[](unsigned I); 56 57 // Return the number of components. 58 unsigned size() const { return Size; } 59 60 private: 61 BasicBlock *BB; 62 BasicBlock::iterator BBI; 63 Value *V; 64 ValueVector *CachePtr; 65 PointerType *PtrTy; 66 ValueVector Tmp; 67 unsigned Size; 68 }; 69 70 // FCmpSpliiter(FCI)(Builder, X, Y, Name) uses Builder to create an FCmp 71 // called Name that compares X and Y in the same way as FCI. 72 struct FCmpSplitter { 73 FCmpSplitter(FCmpInst &fci) : FCI(fci) {} 74 Value *operator()(IRBuilder<> &Builder, Value *Op0, Value *Op1, 75 const Twine &Name) const { 76 return Builder.CreateFCmp(FCI.getPredicate(), Op0, Op1, Name); 77 } 78 FCmpInst &FCI; 79 }; 80 81 // ICmpSpliiter(ICI)(Builder, X, Y, Name) uses Builder to create an ICmp 82 // called Name that compares X and Y in the same way as ICI. 83 struct ICmpSplitter { 84 ICmpSplitter(ICmpInst &ici) : ICI(ici) {} 85 Value *operator()(IRBuilder<> &Builder, Value *Op0, Value *Op1, 86 const Twine &Name) const { 87 return Builder.CreateICmp(ICI.getPredicate(), Op0, Op1, Name); 88 } 89 ICmpInst &ICI; 90 }; 91 92 // BinarySpliiter(BO)(Builder, X, Y, Name) uses Builder to create 93 // a binary operator like BO called Name with operands X and Y. 94 struct BinarySplitter { 95 BinarySplitter(BinaryOperator &bo) : BO(bo) {} 96 Value *operator()(IRBuilder<> &Builder, Value *Op0, Value *Op1, 97 const Twine &Name) const { 98 return Builder.CreateBinOp(BO.getOpcode(), Op0, Op1, Name); 99 } 100 BinaryOperator &BO; 101 }; 102 103 // Information about a load or store that we're scalarizing. 104 struct VectorLayout { 105 VectorLayout() : VecTy(nullptr), ElemTy(nullptr), VecAlign(0), ElemSize(0) {} 106 107 // Return the alignment of element I. 108 uint64_t getElemAlign(unsigned I) { 109 return MinAlign(VecAlign, I * ElemSize); 110 } 111 112 // The type of the vector. 113 VectorType *VecTy; 114 115 // The type of each element. 116 Type *ElemTy; 117 118 // The alignment of the vector. 119 uint64_t VecAlign; 120 121 // The size of each element. 122 uint64_t ElemSize; 123 }; 124 125 class Scalarizer : public FunctionPass, 126 public InstVisitor<Scalarizer, bool> { 127 public: 128 static char ID; 129 130 Scalarizer() : 131 FunctionPass(ID) { 132 initializeScalarizerPass(*PassRegistry::getPassRegistry()); 133 } 134 135 bool doInitialization(Module &M) override; 136 bool runOnFunction(Function &F) override; 137 138 // InstVisitor methods. They return true if the instruction was scalarized, 139 // false if nothing changed. 140 bool visitInstruction(Instruction &) { return false; } 141 bool visitSelectInst(SelectInst &SI); 142 bool visitICmpInst(ICmpInst &); 143 bool visitFCmpInst(FCmpInst &); 144 bool visitBinaryOperator(BinaryOperator &); 145 bool visitGetElementPtrInst(GetElementPtrInst &); 146 bool visitCastInst(CastInst &); 147 bool visitBitCastInst(BitCastInst &); 148 bool visitShuffleVectorInst(ShuffleVectorInst &); 149 bool visitPHINode(PHINode &); 150 bool visitLoadInst(LoadInst &); 151 bool visitStoreInst(StoreInst &); 152 bool visitCallInst(CallInst &I); 153 154 static void registerOptions() { 155 // This is disabled by default because having separate loads and stores 156 // makes it more likely that the -combiner-alias-analysis limits will be 157 // reached. 158 OptionRegistry::registerOption<bool, Scalarizer, 159 &Scalarizer::ScalarizeLoadStore>( 160 "scalarize-load-store", 161 "Allow the scalarizer pass to scalarize loads and store", false); 162 } 163 164 private: 165 Scatterer scatter(Instruction *, Value *); 166 void gather(Instruction *, const ValueVector &); 167 bool canTransferMetadata(unsigned Kind); 168 void transferMetadata(Instruction *, const ValueVector &); 169 bool getVectorLayout(Type *, unsigned, VectorLayout &, const DataLayout &); 170 bool finish(); 171 172 template<typename T> bool splitBinary(Instruction &, const T &); 173 174 bool splitCall(CallInst &CI); 175 176 ScatterMap Scattered; 177 GatherList Gathered; 178 unsigned ParallelLoopAccessMDKind; 179 bool ScalarizeLoadStore; 180 }; 181 182 char Scalarizer::ID = 0; 183 } // end anonymous namespace 184 185 INITIALIZE_PASS_WITH_OPTIONS(Scalarizer, "scalarizer", 186 "Scalarize vector operations", false, false) 187 188 Scatterer::Scatterer(BasicBlock *bb, BasicBlock::iterator bbi, Value *v, 189 ValueVector *cachePtr) 190 : BB(bb), BBI(bbi), V(v), CachePtr(cachePtr) { 191 Type *Ty = V->getType(); 192 PtrTy = dyn_cast<PointerType>(Ty); 193 if (PtrTy) 194 Ty = PtrTy->getElementType(); 195 Size = Ty->getVectorNumElements(); 196 if (!CachePtr) 197 Tmp.resize(Size, nullptr); 198 else if (CachePtr->empty()) 199 CachePtr->resize(Size, nullptr); 200 else 201 assert(Size == CachePtr->size() && "Inconsistent vector sizes"); 202 } 203 204 // Return component I, creating a new Value for it if necessary. 205 Value *Scatterer::operator[](unsigned I) { 206 ValueVector &CV = (CachePtr ? *CachePtr : Tmp); 207 // Try to reuse a previous value. 208 if (CV[I]) 209 return CV[I]; 210 IRBuilder<> Builder(BB, BBI); 211 if (PtrTy) { 212 if (!CV[0]) { 213 Type *Ty = 214 PointerType::get(PtrTy->getElementType()->getVectorElementType(), 215 PtrTy->getAddressSpace()); 216 CV[0] = Builder.CreateBitCast(V, Ty, V->getName() + ".i0"); 217 } 218 if (I != 0) 219 CV[I] = Builder.CreateConstGEP1_32(nullptr, CV[0], I, 220 V->getName() + ".i" + Twine(I)); 221 } else { 222 // Search through a chain of InsertElementInsts looking for element I. 223 // Record other elements in the cache. The new V is still suitable 224 // for all uncached indices. 225 for (;;) { 226 InsertElementInst *Insert = dyn_cast<InsertElementInst>(V); 227 if (!Insert) 228 break; 229 ConstantInt *Idx = dyn_cast<ConstantInt>(Insert->getOperand(2)); 230 if (!Idx) 231 break; 232 unsigned J = Idx->getZExtValue(); 233 V = Insert->getOperand(0); 234 if (I == J) { 235 CV[J] = Insert->getOperand(1); 236 return CV[J]; 237 } else if (!CV[J]) { 238 // Only cache the first entry we find for each index we're not actively 239 // searching for. This prevents us from going too far up the chain and 240 // caching incorrect entries. 241 CV[J] = Insert->getOperand(1); 242 } 243 } 244 CV[I] = Builder.CreateExtractElement(V, Builder.getInt32(I), 245 V->getName() + ".i" + Twine(I)); 246 } 247 return CV[I]; 248 } 249 250 bool Scalarizer::doInitialization(Module &M) { 251 ParallelLoopAccessMDKind = 252 M.getContext().getMDKindID("llvm.mem.parallel_loop_access"); 253 ScalarizeLoadStore = 254 M.getContext().getOption<bool, Scalarizer, &Scalarizer::ScalarizeLoadStore>(); 255 return false; 256 } 257 258 bool Scalarizer::runOnFunction(Function &F) { 259 if (skipFunction(F)) 260 return false; 261 assert(Gathered.empty() && Scattered.empty()); 262 for (BasicBlock &BB : F) { 263 for (BasicBlock::iterator II = BB.begin(), IE = BB.end(); II != IE;) { 264 Instruction *I = &*II; 265 bool Done = visit(I); 266 ++II; 267 if (Done && I->getType()->isVoidTy()) 268 I->eraseFromParent(); 269 } 270 } 271 return finish(); 272 } 273 274 // Return a scattered form of V that can be accessed by Point. V must be a 275 // vector or a pointer to a vector. 276 Scatterer Scalarizer::scatter(Instruction *Point, Value *V) { 277 if (Argument *VArg = dyn_cast<Argument>(V)) { 278 // Put the scattered form of arguments in the entry block, 279 // so that it can be used everywhere. 280 Function *F = VArg->getParent(); 281 BasicBlock *BB = &F->getEntryBlock(); 282 return Scatterer(BB, BB->begin(), V, &Scattered[V]); 283 } 284 if (Instruction *VOp = dyn_cast<Instruction>(V)) { 285 // Put the scattered form of an instruction directly after the 286 // instruction. 287 BasicBlock *BB = VOp->getParent(); 288 return Scatterer(BB, std::next(BasicBlock::iterator(VOp)), 289 V, &Scattered[V]); 290 } 291 // In the fallback case, just put the scattered before Point and 292 // keep the result local to Point. 293 return Scatterer(Point->getParent(), Point->getIterator(), V); 294 } 295 296 // Replace Op with the gathered form of the components in CV. Defer the 297 // deletion of Op and creation of the gathered form to the end of the pass, 298 // so that we can avoid creating the gathered form if all uses of Op are 299 // replaced with uses of CV. 300 void Scalarizer::gather(Instruction *Op, const ValueVector &CV) { 301 // Since we're not deleting Op yet, stub out its operands, so that it 302 // doesn't make anything live unnecessarily. 303 for (unsigned I = 0, E = Op->getNumOperands(); I != E; ++I) 304 Op->setOperand(I, UndefValue::get(Op->getOperand(I)->getType())); 305 306 transferMetadata(Op, CV); 307 308 // If we already have a scattered form of Op (created from ExtractElements 309 // of Op itself), replace them with the new form. 310 ValueVector &SV = Scattered[Op]; 311 if (!SV.empty()) { 312 for (unsigned I = 0, E = SV.size(); I != E; ++I) { 313 Value *V = SV[I]; 314 if (V == nullptr) 315 continue; 316 317 Instruction *Old = cast<Instruction>(V); 318 CV[I]->takeName(Old); 319 Old->replaceAllUsesWith(CV[I]); 320 Old->eraseFromParent(); 321 } 322 } 323 SV = CV; 324 Gathered.push_back(GatherList::value_type(Op, &SV)); 325 } 326 327 // Return true if it is safe to transfer the given metadata tag from 328 // vector to scalar instructions. 329 bool Scalarizer::canTransferMetadata(unsigned Tag) { 330 return (Tag == LLVMContext::MD_tbaa 331 || Tag == LLVMContext::MD_fpmath 332 || Tag == LLVMContext::MD_tbaa_struct 333 || Tag == LLVMContext::MD_invariant_load 334 || Tag == LLVMContext::MD_alias_scope 335 || Tag == LLVMContext::MD_noalias 336 || Tag == ParallelLoopAccessMDKind); 337 } 338 339 // Transfer metadata from Op to the instructions in CV if it is known 340 // to be safe to do so. 341 void Scalarizer::transferMetadata(Instruction *Op, const ValueVector &CV) { 342 SmallVector<std::pair<unsigned, MDNode *>, 4> MDs; 343 Op->getAllMetadataOtherThanDebugLoc(MDs); 344 for (unsigned I = 0, E = CV.size(); I != E; ++I) { 345 if (Instruction *New = dyn_cast<Instruction>(CV[I])) { 346 for (const auto &MD : MDs) 347 if (canTransferMetadata(MD.first)) 348 New->setMetadata(MD.first, MD.second); 349 if (Op->getDebugLoc() && !New->getDebugLoc()) 350 New->setDebugLoc(Op->getDebugLoc()); 351 } 352 } 353 } 354 355 // Try to fill in Layout from Ty, returning true on success. Alignment is 356 // the alignment of the vector, or 0 if the ABI default should be used. 357 bool Scalarizer::getVectorLayout(Type *Ty, unsigned Alignment, 358 VectorLayout &Layout, const DataLayout &DL) { 359 // Make sure we're dealing with a vector. 360 Layout.VecTy = dyn_cast<VectorType>(Ty); 361 if (!Layout.VecTy) 362 return false; 363 364 // Check that we're dealing with full-byte elements. 365 Layout.ElemTy = Layout.VecTy->getElementType(); 366 if (DL.getTypeSizeInBits(Layout.ElemTy) != 367 DL.getTypeStoreSizeInBits(Layout.ElemTy)) 368 return false; 369 370 if (Alignment) 371 Layout.VecAlign = Alignment; 372 else 373 Layout.VecAlign = DL.getABITypeAlignment(Layout.VecTy); 374 Layout.ElemSize = DL.getTypeStoreSize(Layout.ElemTy); 375 return true; 376 } 377 378 // Scalarize two-operand instruction I, using Split(Builder, X, Y, Name) 379 // to create an instruction like I with operands X and Y and name Name. 380 template<typename Splitter> 381 bool Scalarizer::splitBinary(Instruction &I, const Splitter &Split) { 382 VectorType *VT = dyn_cast<VectorType>(I.getType()); 383 if (!VT) 384 return false; 385 386 unsigned NumElems = VT->getNumElements(); 387 IRBuilder<> Builder(&I); 388 Scatterer Op0 = scatter(&I, I.getOperand(0)); 389 Scatterer Op1 = scatter(&I, I.getOperand(1)); 390 assert(Op0.size() == NumElems && "Mismatched binary operation"); 391 assert(Op1.size() == NumElems && "Mismatched binary operation"); 392 ValueVector Res; 393 Res.resize(NumElems); 394 for (unsigned Elem = 0; Elem < NumElems; ++Elem) 395 Res[Elem] = Split(Builder, Op0[Elem], Op1[Elem], 396 I.getName() + ".i" + Twine(Elem)); 397 gather(&I, Res); 398 return true; 399 } 400 401 static bool isTriviallyScalariable(Intrinsic::ID ID) { 402 return isTriviallyVectorizable(ID); 403 } 404 405 // All of the current scalarizable intrinsics only have one mangled type. 406 static Function *getScalarIntrinsicDeclaration(Module *M, 407 Intrinsic::ID ID, 408 VectorType *Ty) { 409 return Intrinsic::getDeclaration(M, ID, { Ty->getScalarType() }); 410 } 411 412 /// If a call to a vector typed intrinsic function, split into a scalar call per 413 /// element if possible for the intrinsic. 414 bool Scalarizer::splitCall(CallInst &CI) { 415 VectorType *VT = dyn_cast<VectorType>(CI.getType()); 416 if (!VT) 417 return false; 418 419 Function *F = CI.getCalledFunction(); 420 if (!F) 421 return false; 422 423 Intrinsic::ID ID = F->getIntrinsicID(); 424 if (ID == Intrinsic::not_intrinsic || !isTriviallyScalariable(ID)) 425 return false; 426 427 unsigned NumElems = VT->getNumElements(); 428 unsigned NumArgs = CI.getNumArgOperands(); 429 430 ValueVector ScalarOperands(NumArgs); 431 SmallVector<Scatterer, 8> Scattered(NumArgs); 432 433 Scattered.resize(NumArgs); 434 435 // Assumes that any vector type has the same number of elements as the return 436 // vector type, which is true for all current intrinsics. 437 for (unsigned I = 0; I != NumArgs; ++I) { 438 Value *OpI = CI.getOperand(I); 439 if (OpI->getType()->isVectorTy()) { 440 Scattered[I] = scatter(&CI, OpI); 441 assert(Scattered[I].size() == NumElems && "mismatched call operands"); 442 } else { 443 ScalarOperands[I] = OpI; 444 } 445 } 446 447 ValueVector Res(NumElems); 448 ValueVector ScalarCallOps(NumArgs); 449 450 Function *NewIntrin = getScalarIntrinsicDeclaration(F->getParent(), ID, VT); 451 IRBuilder<> Builder(&CI); 452 453 // Perform actual scalarization, taking care to preserve any scalar operands. 454 for (unsigned Elem = 0; Elem < NumElems; ++Elem) { 455 ScalarCallOps.clear(); 456 457 for (unsigned J = 0; J != NumArgs; ++J) { 458 if (hasVectorInstrinsicScalarOpd(ID, J)) 459 ScalarCallOps.push_back(ScalarOperands[J]); 460 else 461 ScalarCallOps.push_back(Scattered[J][Elem]); 462 } 463 464 Res[Elem] = Builder.CreateCall(NewIntrin, ScalarCallOps, 465 CI.getName() + ".i" + Twine(Elem)); 466 } 467 468 gather(&CI, Res); 469 return true; 470 } 471 472 bool Scalarizer::visitSelectInst(SelectInst &SI) { 473 VectorType *VT = dyn_cast<VectorType>(SI.getType()); 474 if (!VT) 475 return false; 476 477 unsigned NumElems = VT->getNumElements(); 478 IRBuilder<> Builder(&SI); 479 Scatterer Op1 = scatter(&SI, SI.getOperand(1)); 480 Scatterer Op2 = scatter(&SI, SI.getOperand(2)); 481 assert(Op1.size() == NumElems && "Mismatched select"); 482 assert(Op2.size() == NumElems && "Mismatched select"); 483 ValueVector Res; 484 Res.resize(NumElems); 485 486 if (SI.getOperand(0)->getType()->isVectorTy()) { 487 Scatterer Op0 = scatter(&SI, SI.getOperand(0)); 488 assert(Op0.size() == NumElems && "Mismatched select"); 489 for (unsigned I = 0; I < NumElems; ++I) 490 Res[I] = Builder.CreateSelect(Op0[I], Op1[I], Op2[I], 491 SI.getName() + ".i" + Twine(I)); 492 } else { 493 Value *Op0 = SI.getOperand(0); 494 for (unsigned I = 0; I < NumElems; ++I) 495 Res[I] = Builder.CreateSelect(Op0, Op1[I], Op2[I], 496 SI.getName() + ".i" + Twine(I)); 497 } 498 gather(&SI, Res); 499 return true; 500 } 501 502 bool Scalarizer::visitICmpInst(ICmpInst &ICI) { 503 return splitBinary(ICI, ICmpSplitter(ICI)); 504 } 505 506 bool Scalarizer::visitFCmpInst(FCmpInst &FCI) { 507 return splitBinary(FCI, FCmpSplitter(FCI)); 508 } 509 510 bool Scalarizer::visitBinaryOperator(BinaryOperator &BO) { 511 return splitBinary(BO, BinarySplitter(BO)); 512 } 513 514 bool Scalarizer::visitGetElementPtrInst(GetElementPtrInst &GEPI) { 515 VectorType *VT = dyn_cast<VectorType>(GEPI.getType()); 516 if (!VT) 517 return false; 518 519 IRBuilder<> Builder(&GEPI); 520 unsigned NumElems = VT->getNumElements(); 521 unsigned NumIndices = GEPI.getNumIndices(); 522 523 Scatterer Base = scatter(&GEPI, GEPI.getOperand(0)); 524 525 SmallVector<Scatterer, 8> Ops; 526 Ops.resize(NumIndices); 527 for (unsigned I = 0; I < NumIndices; ++I) 528 Ops[I] = scatter(&GEPI, GEPI.getOperand(I + 1)); 529 530 ValueVector Res; 531 Res.resize(NumElems); 532 for (unsigned I = 0; I < NumElems; ++I) { 533 SmallVector<Value *, 8> Indices; 534 Indices.resize(NumIndices); 535 for (unsigned J = 0; J < NumIndices; ++J) 536 Indices[J] = Ops[J][I]; 537 Res[I] = Builder.CreateGEP(GEPI.getSourceElementType(), Base[I], Indices, 538 GEPI.getName() + ".i" + Twine(I)); 539 if (GEPI.isInBounds()) 540 if (GetElementPtrInst *NewGEPI = dyn_cast<GetElementPtrInst>(Res[I])) 541 NewGEPI->setIsInBounds(); 542 } 543 gather(&GEPI, Res); 544 return true; 545 } 546 547 bool Scalarizer::visitCastInst(CastInst &CI) { 548 VectorType *VT = dyn_cast<VectorType>(CI.getDestTy()); 549 if (!VT) 550 return false; 551 552 unsigned NumElems = VT->getNumElements(); 553 IRBuilder<> Builder(&CI); 554 Scatterer Op0 = scatter(&CI, CI.getOperand(0)); 555 assert(Op0.size() == NumElems && "Mismatched cast"); 556 ValueVector Res; 557 Res.resize(NumElems); 558 for (unsigned I = 0; I < NumElems; ++I) 559 Res[I] = Builder.CreateCast(CI.getOpcode(), Op0[I], VT->getElementType(), 560 CI.getName() + ".i" + Twine(I)); 561 gather(&CI, Res); 562 return true; 563 } 564 565 bool Scalarizer::visitBitCastInst(BitCastInst &BCI) { 566 VectorType *DstVT = dyn_cast<VectorType>(BCI.getDestTy()); 567 VectorType *SrcVT = dyn_cast<VectorType>(BCI.getSrcTy()); 568 if (!DstVT || !SrcVT) 569 return false; 570 571 unsigned DstNumElems = DstVT->getNumElements(); 572 unsigned SrcNumElems = SrcVT->getNumElements(); 573 IRBuilder<> Builder(&BCI); 574 Scatterer Op0 = scatter(&BCI, BCI.getOperand(0)); 575 ValueVector Res; 576 Res.resize(DstNumElems); 577 578 if (DstNumElems == SrcNumElems) { 579 for (unsigned I = 0; I < DstNumElems; ++I) 580 Res[I] = Builder.CreateBitCast(Op0[I], DstVT->getElementType(), 581 BCI.getName() + ".i" + Twine(I)); 582 } else if (DstNumElems > SrcNumElems) { 583 // <M x t1> -> <N*M x t2>. Convert each t1 to <N x t2> and copy the 584 // individual elements to the destination. 585 unsigned FanOut = DstNumElems / SrcNumElems; 586 Type *MidTy = VectorType::get(DstVT->getElementType(), FanOut); 587 unsigned ResI = 0; 588 for (unsigned Op0I = 0; Op0I < SrcNumElems; ++Op0I) { 589 Value *V = Op0[Op0I]; 590 Instruction *VI; 591 // Look through any existing bitcasts before converting to <N x t2>. 592 // In the best case, the resulting conversion might be a no-op. 593 while ((VI = dyn_cast<Instruction>(V)) && 594 VI->getOpcode() == Instruction::BitCast) 595 V = VI->getOperand(0); 596 V = Builder.CreateBitCast(V, MidTy, V->getName() + ".cast"); 597 Scatterer Mid = scatter(&BCI, V); 598 for (unsigned MidI = 0; MidI < FanOut; ++MidI) 599 Res[ResI++] = Mid[MidI]; 600 } 601 } else { 602 // <N*M x t1> -> <M x t2>. Convert each group of <N x t1> into a t2. 603 unsigned FanIn = SrcNumElems / DstNumElems; 604 Type *MidTy = VectorType::get(SrcVT->getElementType(), FanIn); 605 unsigned Op0I = 0; 606 for (unsigned ResI = 0; ResI < DstNumElems; ++ResI) { 607 Value *V = UndefValue::get(MidTy); 608 for (unsigned MidI = 0; MidI < FanIn; ++MidI) 609 V = Builder.CreateInsertElement(V, Op0[Op0I++], Builder.getInt32(MidI), 610 BCI.getName() + ".i" + Twine(ResI) 611 + ".upto" + Twine(MidI)); 612 Res[ResI] = Builder.CreateBitCast(V, DstVT->getElementType(), 613 BCI.getName() + ".i" + Twine(ResI)); 614 } 615 } 616 gather(&BCI, Res); 617 return true; 618 } 619 620 bool Scalarizer::visitShuffleVectorInst(ShuffleVectorInst &SVI) { 621 VectorType *VT = dyn_cast<VectorType>(SVI.getType()); 622 if (!VT) 623 return false; 624 625 unsigned NumElems = VT->getNumElements(); 626 Scatterer Op0 = scatter(&SVI, SVI.getOperand(0)); 627 Scatterer Op1 = scatter(&SVI, SVI.getOperand(1)); 628 ValueVector Res; 629 Res.resize(NumElems); 630 631 for (unsigned I = 0; I < NumElems; ++I) { 632 int Selector = SVI.getMaskValue(I); 633 if (Selector < 0) 634 Res[I] = UndefValue::get(VT->getElementType()); 635 else if (unsigned(Selector) < Op0.size()) 636 Res[I] = Op0[Selector]; 637 else 638 Res[I] = Op1[Selector - Op0.size()]; 639 } 640 gather(&SVI, Res); 641 return true; 642 } 643 644 bool Scalarizer::visitPHINode(PHINode &PHI) { 645 VectorType *VT = dyn_cast<VectorType>(PHI.getType()); 646 if (!VT) 647 return false; 648 649 unsigned NumElems = VT->getNumElements(); 650 IRBuilder<> Builder(&PHI); 651 ValueVector Res; 652 Res.resize(NumElems); 653 654 unsigned NumOps = PHI.getNumOperands(); 655 for (unsigned I = 0; I < NumElems; ++I) 656 Res[I] = Builder.CreatePHI(VT->getElementType(), NumOps, 657 PHI.getName() + ".i" + Twine(I)); 658 659 for (unsigned I = 0; I < NumOps; ++I) { 660 Scatterer Op = scatter(&PHI, PHI.getIncomingValue(I)); 661 BasicBlock *IncomingBlock = PHI.getIncomingBlock(I); 662 for (unsigned J = 0; J < NumElems; ++J) 663 cast<PHINode>(Res[J])->addIncoming(Op[J], IncomingBlock); 664 } 665 gather(&PHI, Res); 666 return true; 667 } 668 669 bool Scalarizer::visitLoadInst(LoadInst &LI) { 670 if (!ScalarizeLoadStore) 671 return false; 672 if (!LI.isSimple()) 673 return false; 674 675 VectorLayout Layout; 676 if (!getVectorLayout(LI.getType(), LI.getAlignment(), Layout, 677 LI.getModule()->getDataLayout())) 678 return false; 679 680 unsigned NumElems = Layout.VecTy->getNumElements(); 681 IRBuilder<> Builder(&LI); 682 Scatterer Ptr = scatter(&LI, LI.getPointerOperand()); 683 ValueVector Res; 684 Res.resize(NumElems); 685 686 for (unsigned I = 0; I < NumElems; ++I) 687 Res[I] = Builder.CreateAlignedLoad(Ptr[I], Layout.getElemAlign(I), 688 LI.getName() + ".i" + Twine(I)); 689 gather(&LI, Res); 690 return true; 691 } 692 693 bool Scalarizer::visitStoreInst(StoreInst &SI) { 694 if (!ScalarizeLoadStore) 695 return false; 696 if (!SI.isSimple()) 697 return false; 698 699 VectorLayout Layout; 700 Value *FullValue = SI.getValueOperand(); 701 if (!getVectorLayout(FullValue->getType(), SI.getAlignment(), Layout, 702 SI.getModule()->getDataLayout())) 703 return false; 704 705 unsigned NumElems = Layout.VecTy->getNumElements(); 706 IRBuilder<> Builder(&SI); 707 Scatterer Ptr = scatter(&SI, SI.getPointerOperand()); 708 Scatterer Val = scatter(&SI, FullValue); 709 710 ValueVector Stores; 711 Stores.resize(NumElems); 712 for (unsigned I = 0; I < NumElems; ++I) { 713 unsigned Align = Layout.getElemAlign(I); 714 Stores[I] = Builder.CreateAlignedStore(Val[I], Ptr[I], Align); 715 } 716 transferMetadata(&SI, Stores); 717 return true; 718 } 719 720 bool Scalarizer::visitCallInst(CallInst &CI) { 721 return splitCall(CI); 722 } 723 724 // Delete the instructions that we scalarized. If a full vector result 725 // is still needed, recreate it using InsertElements. 726 bool Scalarizer::finish() { 727 // The presence of data in Gathered or Scattered indicates changes 728 // made to the Function. 729 if (Gathered.empty() && Scattered.empty()) 730 return false; 731 for (const auto &GMI : Gathered) { 732 Instruction *Op = GMI.first; 733 ValueVector &CV = *GMI.second; 734 if (!Op->use_empty()) { 735 // The value is still needed, so recreate it using a series of 736 // InsertElements. 737 Type *Ty = Op->getType(); 738 Value *Res = UndefValue::get(Ty); 739 BasicBlock *BB = Op->getParent(); 740 unsigned Count = Ty->getVectorNumElements(); 741 IRBuilder<> Builder(Op); 742 if (isa<PHINode>(Op)) 743 Builder.SetInsertPoint(BB, BB->getFirstInsertionPt()); 744 for (unsigned I = 0; I < Count; ++I) 745 Res = Builder.CreateInsertElement(Res, CV[I], Builder.getInt32(I), 746 Op->getName() + ".upto" + Twine(I)); 747 Res->takeName(Op); 748 Op->replaceAllUsesWith(Res); 749 } 750 Op->eraseFromParent(); 751 } 752 Gathered.clear(); 753 Scattered.clear(); 754 return true; 755 } 756 757 FunctionPass *llvm::createScalarizerPass() { 758 return new Scalarizer(); 759 } 760