1 //===--- BlockGenerators.cpp - Generate code for statements -----*- C++ -*-===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements the BlockGenerator and VectorBlockGenerator classes, 11 // which generate sequential code and vectorized code for a polyhedral 12 // statement, respectively. 13 // 14 //===----------------------------------------------------------------------===// 15 16 #include "polly/ScopInfo.h" 17 #include "isl/aff.h" 18 #include "isl/set.h" 19 #include "polly/CodeGen/BlockGenerators.h" 20 #include "polly/CodeGen/CodeGeneration.h" 21 #include "polly/Options.h" 22 #include "polly/Support/GICHelper.h" 23 #include "polly/Support/SCEVValidator.h" 24 #include "polly/Support/ScopHelper.h" 25 #include "llvm/Analysis/LoopInfo.h" 26 #include "llvm/Analysis/ScalarEvolution.h" 27 #include "llvm/Analysis/ScalarEvolutionExpander.h" 28 #include "llvm/IR/IntrinsicInst.h" 29 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 30 31 using namespace llvm; 32 using namespace polly; 33 34 static cl::opt<bool> 35 Aligned("enable-polly-aligned", cl::desc("Assumed aligned memory accesses."), 36 cl::Hidden, cl::value_desc("OpenMP code generation enabled if true"), 37 cl::init(false), cl::ZeroOrMore, cl::cat(PollyCategory)); 38 39 static cl::opt<bool, true> 40 SCEVCodegenF("polly-codegen-scev", cl::desc("Use SCEV based code generation."), 41 cl::Hidden, cl::location(SCEVCodegen), cl::init(false), 42 cl::ZeroOrMore, cl::cat(PollyCategory)); 43 44 bool polly::SCEVCodegen; 45 46 bool polly::canSynthesize(const Instruction *I, const llvm::LoopInfo *LI, 47 ScalarEvolution *SE, const Region *R) { 48 if (SCEVCodegen) { 49 if (!I || !SE->isSCEVable(I->getType())) 50 return false; 51 52 if (const SCEV *Scev = SE->getSCEV(const_cast<Instruction *>(I))) 53 if (!isa<SCEVCouldNotCompute>(Scev)) 54 if (!hasScalarDepsInsideRegion(Scev, R)) 55 return true; 56 57 return false; 58 } 59 60 Loop *L = LI->getLoopFor(I->getParent()); 61 return L && I == L->getCanonicalInductionVariable() && R->contains(L); 62 } 63 64 // Helper class to generate memory location. 65 namespace { 66 class IslGenerator { 67 public: 68 IslGenerator(PollyIRBuilder &Builder, std::vector<Value *> &IVS) 69 : Builder(Builder), IVS(IVS) {} 70 Value *generateIslVal(__isl_take isl_val *Val); 71 Value *generateIslAff(__isl_take isl_aff *Aff); 72 Value *generateIslPwAff(__isl_take isl_pw_aff *PwAff); 73 74 private: 75 typedef struct { 76 Value *Result; 77 class IslGenerator *Generator; 78 } IslGenInfo; 79 80 PollyIRBuilder &Builder; 81 std::vector<Value *> &IVS; 82 static int mergeIslAffValues(__isl_take isl_set *Set, __isl_take isl_aff *Aff, 83 void *User); 84 }; 85 } 86 87 Value *IslGenerator::generateIslVal(__isl_take isl_val *Val) { 88 Value *IntValue = Builder.getInt(APIntFromVal(Val)); 89 return IntValue; 90 } 91 92 Value *IslGenerator::generateIslAff(__isl_take isl_aff *Aff) { 93 Value *Result; 94 Value *ConstValue; 95 isl_val *Val; 96 97 Val = isl_aff_get_constant_val(Aff); 98 ConstValue = generateIslVal(Val); 99 Type *Ty = Builder.getInt64Ty(); 100 101 // FIXME: We should give the constant and coefficients the right type. Here 102 // we force it into i64. 103 Result = Builder.CreateSExtOrBitCast(ConstValue, Ty); 104 105 unsigned int NbInputDims = isl_aff_dim(Aff, isl_dim_in); 106 107 assert((IVS.size() == NbInputDims) && 108 "The Dimension of Induction Variables must match the dimension of the " 109 "affine space."); 110 111 for (unsigned int i = 0; i < NbInputDims; ++i) { 112 Value *CoefficientValue; 113 Val = isl_aff_get_coefficient_val(Aff, isl_dim_in, i); 114 115 if (isl_val_is_zero(Val)) { 116 isl_val_free(Val); 117 continue; 118 } 119 120 CoefficientValue = generateIslVal(Val); 121 CoefficientValue = Builder.CreateIntCast(CoefficientValue, Ty, true); 122 Value *IV = Builder.CreateIntCast(IVS[i], Ty, true); 123 Value *PAdd = Builder.CreateMul(CoefficientValue, IV, "p_mul_coeff"); 124 Result = Builder.CreateAdd(Result, PAdd, "p_sum_coeff"); 125 } 126 127 isl_aff_free(Aff); 128 129 return Result; 130 } 131 132 int IslGenerator::mergeIslAffValues(__isl_take isl_set *Set, 133 __isl_take isl_aff *Aff, void *User) { 134 IslGenInfo *GenInfo = (IslGenInfo *)User; 135 136 assert((GenInfo->Result == NULL) && 137 "Result is already set. Currently only single isl_aff is supported"); 138 assert(isl_set_plain_is_universe(Set) && 139 "Code generation failed because the set is not universe"); 140 141 GenInfo->Result = GenInfo->Generator->generateIslAff(Aff); 142 143 isl_set_free(Set); 144 return 0; 145 } 146 147 Value *IslGenerator::generateIslPwAff(__isl_take isl_pw_aff *PwAff) { 148 IslGenInfo User; 149 User.Result = NULL; 150 User.Generator = this; 151 isl_pw_aff_foreach_piece(PwAff, mergeIslAffValues, &User); 152 assert(User.Result && "Code generation for isl_pw_aff failed"); 153 154 isl_pw_aff_free(PwAff); 155 return User.Result; 156 } 157 158 BlockGenerator::BlockGenerator(PollyIRBuilder &B, ScopStmt &Stmt, Pass *P) 159 : Builder(B), Statement(Stmt), P(P), SE(P->getAnalysis<ScalarEvolution>()) { 160 } 161 162 Value *BlockGenerator::lookupAvailableValue(const Value *Old, ValueMapT &BBMap, 163 ValueMapT &GlobalMap) const { 164 // We assume constants never change. 165 // This avoids map lookups for many calls to this function. 166 if (isa<Constant>(Old)) 167 return const_cast<Value *>(Old); 168 169 if (Value *New = GlobalMap.lookup(Old)) { 170 if (Old->getType()->getScalarSizeInBits() < 171 New->getType()->getScalarSizeInBits()) 172 New = Builder.CreateTruncOrBitCast(New, Old->getType()); 173 174 return New; 175 } 176 177 // Or it is probably a scop-constant value defined as global, function 178 // parameter or an instruction not within the scop. 179 if (isa<GlobalValue>(Old) || isa<Argument>(Old)) 180 return const_cast<Value *>(Old); 181 182 if (const Instruction *Inst = dyn_cast<Instruction>(Old)) 183 if (!Statement.getParent()->getRegion().contains(Inst->getParent())) 184 return const_cast<Value *>(Old); 185 186 if (Value *New = BBMap.lookup(Old)) 187 return New; 188 189 return NULL; 190 } 191 192 Value *BlockGenerator::getNewValue(const Value *Old, ValueMapT &BBMap, 193 ValueMapT &GlobalMap, LoopToScevMapT <S, 194 Loop *L) { 195 if (Value *New = lookupAvailableValue(Old, BBMap, GlobalMap)) 196 return New; 197 198 if (SCEVCodegen && SE.isSCEVable(Old->getType())) 199 if (const SCEV *Scev = SE.getSCEVAtScope(const_cast<Value *>(Old), L)) { 200 if (!isa<SCEVCouldNotCompute>(Scev)) { 201 const SCEV *NewScev = apply(Scev, LTS, SE); 202 ValueToValueMap VTV; 203 VTV.insert(BBMap.begin(), BBMap.end()); 204 VTV.insert(GlobalMap.begin(), GlobalMap.end()); 205 NewScev = SCEVParameterRewriter::rewrite(NewScev, SE, VTV); 206 SCEVExpander Expander(SE, "polly"); 207 Value *Expanded = Expander.expandCodeFor(NewScev, Old->getType(), 208 Builder.GetInsertPoint()); 209 210 BBMap[Old] = Expanded; 211 return Expanded; 212 } 213 } 214 215 // Now the scalar dependence is neither available nor SCEVCodegenable, this 216 // should never happen in the current code generator. 217 llvm_unreachable("Unexpected scalar dependence in region!"); 218 return NULL; 219 } 220 221 void BlockGenerator::copyInstScalar(const Instruction *Inst, ValueMapT &BBMap, 222 ValueMapT &GlobalMap, LoopToScevMapT <S) { 223 224 // We do not generate debug intrinsics as we did not investigate how to 225 // copy them correctly. At the current state, they just crash the code 226 // generation as the meta-data operands are not correctly copied. 227 if (isa<DbgInfoIntrinsic>(Inst)) 228 return; 229 230 Instruction *NewInst = Inst->clone(); 231 232 // Replace old operands with the new ones. 233 for (Instruction::const_op_iterator OI = Inst->op_begin(), 234 OE = Inst->op_end(); 235 OI != OE; ++OI) { 236 Value *OldOperand = *OI; 237 Value *NewOperand = 238 getNewValue(OldOperand, BBMap, GlobalMap, LTS, getLoopForInst(Inst)); 239 240 if (!NewOperand) { 241 assert(!isa<StoreInst>(NewInst) && 242 "Store instructions are always needed!"); 243 delete NewInst; 244 return; 245 } 246 247 NewInst->replaceUsesOfWith(OldOperand, NewOperand); 248 } 249 250 Builder.Insert(NewInst); 251 BBMap[Inst] = NewInst; 252 253 if (!NewInst->getType()->isVoidTy()) 254 NewInst->setName("p_" + Inst->getName()); 255 } 256 257 std::vector<Value *> BlockGenerator::getMemoryAccessIndex( 258 __isl_keep isl_map *AccessRelation, Value *BaseAddress, ValueMapT &BBMap, 259 ValueMapT &GlobalMap, LoopToScevMapT <S, Loop *L) { 260 261 assert((isl_map_dim(AccessRelation, isl_dim_out) == 1) && 262 "Only single dimensional access functions supported"); 263 264 std::vector<Value *> IVS; 265 for (unsigned i = 0; i < Statement.getNumIterators(); ++i) { 266 const Value *OriginalIV = Statement.getInductionVariableForDimension(i); 267 Value *NewIV = getNewValue(OriginalIV, BBMap, GlobalMap, LTS, L); 268 IVS.push_back(NewIV); 269 } 270 271 isl_pw_aff *PwAff = isl_map_dim_max(isl_map_copy(AccessRelation), 0); 272 IslGenerator IslGen(Builder, IVS); 273 Value *OffsetValue = IslGen.generateIslPwAff(PwAff); 274 275 Type *Ty = Builder.getInt64Ty(); 276 OffsetValue = Builder.CreateIntCast(OffsetValue, Ty, true); 277 278 std::vector<Value *> IndexArray; 279 Value *NullValue = Constant::getNullValue(Ty); 280 IndexArray.push_back(NullValue); 281 IndexArray.push_back(OffsetValue); 282 return IndexArray; 283 } 284 285 Value *BlockGenerator::getNewAccessOperand( 286 __isl_keep isl_map *NewAccessRelation, Value *BaseAddress, ValueMapT &BBMap, 287 ValueMapT &GlobalMap, LoopToScevMapT <S, Loop *L) { 288 std::vector<Value *> IndexArray = getMemoryAccessIndex( 289 NewAccessRelation, BaseAddress, BBMap, GlobalMap, LTS, L); 290 Value *NewOperand = 291 Builder.CreateGEP(BaseAddress, IndexArray, "p_newarrayidx_"); 292 return NewOperand; 293 } 294 295 Value *BlockGenerator::generateLocationAccessed(const Instruction *Inst, 296 const Value *Pointer, 297 ValueMapT &BBMap, 298 ValueMapT &GlobalMap, 299 LoopToScevMapT <S) { 300 const MemoryAccess &Access = Statement.getAccessFor(Inst); 301 isl_map *CurrentAccessRelation = Access.getAccessRelation(); 302 isl_map *NewAccessRelation = Access.getNewAccessRelation(); 303 304 assert(isl_map_has_equal_space(CurrentAccessRelation, NewAccessRelation) && 305 "Current and new access function use different spaces"); 306 307 Value *NewPointer; 308 309 if (!NewAccessRelation) { 310 NewPointer = 311 getNewValue(Pointer, BBMap, GlobalMap, LTS, getLoopForInst(Inst)); 312 } else { 313 Value *BaseAddress = const_cast<Value *>(Access.getBaseAddr()); 314 NewPointer = getNewAccessOperand(NewAccessRelation, BaseAddress, BBMap, 315 GlobalMap, LTS, getLoopForInst(Inst)); 316 } 317 318 isl_map_free(CurrentAccessRelation); 319 isl_map_free(NewAccessRelation); 320 return NewPointer; 321 } 322 323 Loop *BlockGenerator::getLoopForInst(const llvm::Instruction *Inst) { 324 return P->getAnalysis<LoopInfo>().getLoopFor(Inst->getParent()); 325 } 326 327 Value *BlockGenerator::generateScalarLoad(const LoadInst *Load, 328 ValueMapT &BBMap, 329 ValueMapT &GlobalMap, 330 LoopToScevMapT <S) { 331 const Value *Pointer = Load->getPointerOperand(); 332 const Instruction *Inst = dyn_cast<Instruction>(Load); 333 Value *NewPointer = 334 generateLocationAccessed(Inst, Pointer, BBMap, GlobalMap, LTS); 335 Value *ScalarLoad = 336 Builder.CreateLoad(NewPointer, Load->getName() + "_p_scalar_"); 337 return ScalarLoad; 338 } 339 340 Value *BlockGenerator::generateScalarStore(const StoreInst *Store, 341 ValueMapT &BBMap, 342 ValueMapT &GlobalMap, 343 LoopToScevMapT <S) { 344 const Value *Pointer = Store->getPointerOperand(); 345 Value *NewPointer = 346 generateLocationAccessed(Store, Pointer, BBMap, GlobalMap, LTS); 347 Value *ValueOperand = getNewValue(Store->getValueOperand(), BBMap, GlobalMap, 348 LTS, getLoopForInst(Store)); 349 350 return Builder.CreateStore(ValueOperand, NewPointer); 351 } 352 353 void BlockGenerator::copyInstruction(const Instruction *Inst, ValueMapT &BBMap, 354 ValueMapT &GlobalMap, 355 LoopToScevMapT <S) { 356 // Terminator instructions control the control flow. They are explicitly 357 // expressed in the clast and do not need to be copied. 358 if (Inst->isTerminator()) 359 return; 360 361 if (canSynthesize(Inst, &P->getAnalysis<LoopInfo>(), &SE, 362 &Statement.getParent()->getRegion())) 363 return; 364 365 if (const LoadInst *Load = dyn_cast<LoadInst>(Inst)) { 366 Value *NewLoad = generateScalarLoad(Load, BBMap, GlobalMap, LTS); 367 // Compute NewLoad before its insertion in BBMap to make the insertion 368 // deterministic. 369 BBMap[Load] = NewLoad; 370 return; 371 } 372 373 if (const StoreInst *Store = dyn_cast<StoreInst>(Inst)) { 374 Value *NewStore = generateScalarStore(Store, BBMap, GlobalMap, LTS); 375 // Compute NewStore before its insertion in BBMap to make the insertion 376 // deterministic. 377 BBMap[Store] = NewStore; 378 return; 379 } 380 381 copyInstScalar(Inst, BBMap, GlobalMap, LTS); 382 } 383 384 void BlockGenerator::copyBB(ValueMapT &GlobalMap, LoopToScevMapT <S) { 385 BasicBlock *BB = Statement.getBasicBlock(); 386 BasicBlock *CopyBB = 387 SplitBlock(Builder.GetInsertBlock(), Builder.GetInsertPoint(), P); 388 CopyBB->setName("polly.stmt." + BB->getName()); 389 Builder.SetInsertPoint(CopyBB->begin()); 390 391 ValueMapT BBMap; 392 393 for (BasicBlock::const_iterator II = BB->begin(), IE = BB->end(); II != IE; 394 ++II) 395 copyInstruction(II, BBMap, GlobalMap, LTS); 396 } 397 398 VectorBlockGenerator::VectorBlockGenerator(PollyIRBuilder &B, 399 VectorValueMapT &GlobalMaps, 400 std::vector<LoopToScevMapT> &VLTS, 401 ScopStmt &Stmt, 402 __isl_keep isl_map *Schedule, 403 Pass *P) 404 : BlockGenerator(B, Stmt, P), GlobalMaps(GlobalMaps), VLTS(VLTS), 405 Schedule(Schedule) { 406 assert(GlobalMaps.size() > 1 && "Only one vector lane found"); 407 assert(Schedule && "No statement domain provided"); 408 } 409 410 Value *VectorBlockGenerator::getVectorValue(const Value *Old, 411 ValueMapT &VectorMap, 412 VectorValueMapT &ScalarMaps, 413 Loop *L) { 414 if (Value *NewValue = VectorMap.lookup(Old)) 415 return NewValue; 416 417 int Width = getVectorWidth(); 418 419 Value *Vector = UndefValue::get(VectorType::get(Old->getType(), Width)); 420 421 for (int Lane = 0; Lane < Width; Lane++) 422 Vector = Builder.CreateInsertElement( 423 Vector, 424 getNewValue(Old, ScalarMaps[Lane], GlobalMaps[Lane], VLTS[Lane], L), 425 Builder.getInt32(Lane)); 426 427 VectorMap[Old] = Vector; 428 429 return Vector; 430 } 431 432 Type *VectorBlockGenerator::getVectorPtrTy(const Value *Val, int Width) { 433 PointerType *PointerTy = dyn_cast<PointerType>(Val->getType()); 434 assert(PointerTy && "PointerType expected"); 435 436 Type *ScalarType = PointerTy->getElementType(); 437 VectorType *VectorType = VectorType::get(ScalarType, Width); 438 439 return PointerType::getUnqual(VectorType); 440 } 441 442 Value * 443 VectorBlockGenerator::generateStrideOneLoad(const LoadInst *Load, 444 VectorValueMapT &ScalarMaps, 445 bool NegativeStride = false) { 446 unsigned VectorWidth = getVectorWidth(); 447 const Value *Pointer = Load->getPointerOperand(); 448 Type *VectorPtrType = getVectorPtrTy(Pointer, VectorWidth); 449 unsigned Offset = NegativeStride ? VectorWidth - 1 : 0; 450 451 Value *NewPointer = NULL; 452 NewPointer = getNewValue(Pointer, ScalarMaps[Offset], GlobalMaps[Offset], 453 VLTS[Offset], getLoopForInst(Load)); 454 Value *VectorPtr = 455 Builder.CreateBitCast(NewPointer, VectorPtrType, "vector_ptr"); 456 LoadInst *VecLoad = 457 Builder.CreateLoad(VectorPtr, Load->getName() + "_p_vec_full"); 458 if (!Aligned) 459 VecLoad->setAlignment(8); 460 461 if (NegativeStride) { 462 SmallVector<Constant *, 16> Indices; 463 for (int i = VectorWidth - 1; i >= 0; i--) 464 Indices.push_back(ConstantInt::get(Builder.getInt32Ty(), i)); 465 Constant *SV = llvm::ConstantVector::get(Indices); 466 Value *RevVecLoad = Builder.CreateShuffleVector( 467 VecLoad, VecLoad, SV, Load->getName() + "_reverse"); 468 return RevVecLoad; 469 } 470 471 return VecLoad; 472 } 473 474 Value *VectorBlockGenerator::generateStrideZeroLoad(const LoadInst *Load, 475 ValueMapT &BBMap) { 476 const Value *Pointer = Load->getPointerOperand(); 477 Type *VectorPtrType = getVectorPtrTy(Pointer, 1); 478 Value *NewPointer = 479 getNewValue(Pointer, BBMap, GlobalMaps[0], VLTS[0], getLoopForInst(Load)); 480 Value *VectorPtr = Builder.CreateBitCast(NewPointer, VectorPtrType, 481 Load->getName() + "_p_vec_p"); 482 LoadInst *ScalarLoad = 483 Builder.CreateLoad(VectorPtr, Load->getName() + "_p_splat_one"); 484 485 if (!Aligned) 486 ScalarLoad->setAlignment(8); 487 488 Constant *SplatVector = Constant::getNullValue( 489 VectorType::get(Builder.getInt32Ty(), getVectorWidth())); 490 491 Value *VectorLoad = Builder.CreateShuffleVector( 492 ScalarLoad, ScalarLoad, SplatVector, Load->getName() + "_p_splat"); 493 return VectorLoad; 494 } 495 496 Value * 497 VectorBlockGenerator::generateUnknownStrideLoad(const LoadInst *Load, 498 VectorValueMapT &ScalarMaps) { 499 int VectorWidth = getVectorWidth(); 500 const Value *Pointer = Load->getPointerOperand(); 501 VectorType *VectorType = VectorType::get( 502 dyn_cast<PointerType>(Pointer->getType())->getElementType(), VectorWidth); 503 504 Value *Vector = UndefValue::get(VectorType); 505 506 for (int i = 0; i < VectorWidth; i++) { 507 Value *NewPointer = getNewValue(Pointer, ScalarMaps[i], GlobalMaps[i], 508 VLTS[i], getLoopForInst(Load)); 509 Value *ScalarLoad = 510 Builder.CreateLoad(NewPointer, Load->getName() + "_p_scalar_"); 511 Vector = Builder.CreateInsertElement( 512 Vector, ScalarLoad, Builder.getInt32(i), Load->getName() + "_p_vec_"); 513 } 514 515 return Vector; 516 } 517 518 void VectorBlockGenerator::generateLoad(const LoadInst *Load, 519 ValueMapT &VectorMap, 520 VectorValueMapT &ScalarMaps) { 521 if (PollyVectorizerChoice >= VECTORIZER_FIRST_NEED_GROUPED_UNROLL || 522 !VectorType::isValidElementType(Load->getType())) { 523 for (int i = 0; i < getVectorWidth(); i++) 524 ScalarMaps[i][Load] = 525 generateScalarLoad(Load, ScalarMaps[i], GlobalMaps[i], VLTS[i]); 526 return; 527 } 528 529 const MemoryAccess &Access = Statement.getAccessFor(Load); 530 531 Value *NewLoad; 532 if (Access.isStrideZero(isl_map_copy(Schedule))) 533 NewLoad = generateStrideZeroLoad(Load, ScalarMaps[0]); 534 else if (Access.isStrideOne(isl_map_copy(Schedule))) 535 NewLoad = generateStrideOneLoad(Load, ScalarMaps); 536 else if (Access.isStrideX(isl_map_copy(Schedule), -1)) 537 NewLoad = generateStrideOneLoad(Load, ScalarMaps, true); 538 else 539 NewLoad = generateUnknownStrideLoad(Load, ScalarMaps); 540 541 VectorMap[Load] = NewLoad; 542 } 543 544 void VectorBlockGenerator::copyUnaryInst(const UnaryInstruction *Inst, 545 ValueMapT &VectorMap, 546 VectorValueMapT &ScalarMaps) { 547 int VectorWidth = getVectorWidth(); 548 Value *NewOperand = getVectorValue(Inst->getOperand(0), VectorMap, ScalarMaps, 549 getLoopForInst(Inst)); 550 551 assert(isa<CastInst>(Inst) && "Can not generate vector code for instruction"); 552 553 const CastInst *Cast = dyn_cast<CastInst>(Inst); 554 VectorType *DestType = VectorType::get(Inst->getType(), VectorWidth); 555 VectorMap[Inst] = Builder.CreateCast(Cast->getOpcode(), NewOperand, DestType); 556 } 557 558 void VectorBlockGenerator::copyBinaryInst(const BinaryOperator *Inst, 559 ValueMapT &VectorMap, 560 VectorValueMapT &ScalarMaps) { 561 Loop *L = getLoopForInst(Inst); 562 Value *OpZero = Inst->getOperand(0); 563 Value *OpOne = Inst->getOperand(1); 564 565 Value *NewOpZero, *NewOpOne; 566 NewOpZero = getVectorValue(OpZero, VectorMap, ScalarMaps, L); 567 NewOpOne = getVectorValue(OpOne, VectorMap, ScalarMaps, L); 568 569 Value *NewInst = Builder.CreateBinOp(Inst->getOpcode(), NewOpZero, NewOpOne, 570 Inst->getName() + "p_vec"); 571 VectorMap[Inst] = NewInst; 572 } 573 574 void VectorBlockGenerator::copyStore(const StoreInst *Store, 575 ValueMapT &VectorMap, 576 VectorValueMapT &ScalarMaps) { 577 int VectorWidth = getVectorWidth(); 578 579 const MemoryAccess &Access = Statement.getAccessFor(Store); 580 581 const Value *Pointer = Store->getPointerOperand(); 582 Value *Vector = getVectorValue(Store->getValueOperand(), VectorMap, 583 ScalarMaps, getLoopForInst(Store)); 584 585 if (Access.isStrideOne(isl_map_copy(Schedule))) { 586 Type *VectorPtrType = getVectorPtrTy(Pointer, VectorWidth); 587 Value *NewPointer = getNewValue(Pointer, ScalarMaps[0], GlobalMaps[0], 588 VLTS[0], getLoopForInst(Store)); 589 590 Value *VectorPtr = 591 Builder.CreateBitCast(NewPointer, VectorPtrType, "vector_ptr"); 592 StoreInst *Store = Builder.CreateStore(Vector, VectorPtr); 593 594 if (!Aligned) 595 Store->setAlignment(8); 596 } else { 597 for (unsigned i = 0; i < ScalarMaps.size(); i++) { 598 Value *Scalar = Builder.CreateExtractElement(Vector, Builder.getInt32(i)); 599 Value *NewPointer = getNewValue(Pointer, ScalarMaps[i], GlobalMaps[i], 600 VLTS[i], getLoopForInst(Store)); 601 Builder.CreateStore(Scalar, NewPointer); 602 } 603 } 604 } 605 606 bool VectorBlockGenerator::hasVectorOperands(const Instruction *Inst, 607 ValueMapT &VectorMap) { 608 for (Instruction::const_op_iterator OI = Inst->op_begin(), 609 OE = Inst->op_end(); 610 OI != OE; ++OI) 611 if (VectorMap.count(*OI)) 612 return true; 613 return false; 614 } 615 616 bool VectorBlockGenerator::extractScalarValues(const Instruction *Inst, 617 ValueMapT &VectorMap, 618 VectorValueMapT &ScalarMaps) { 619 bool HasVectorOperand = false; 620 int VectorWidth = getVectorWidth(); 621 622 for (Instruction::const_op_iterator OI = Inst->op_begin(), 623 OE = Inst->op_end(); 624 OI != OE; ++OI) { 625 ValueMapT::iterator VecOp = VectorMap.find(*OI); 626 627 if (VecOp == VectorMap.end()) 628 continue; 629 630 HasVectorOperand = true; 631 Value *NewVector = VecOp->second; 632 633 for (int i = 0; i < VectorWidth; ++i) { 634 ValueMapT &SM = ScalarMaps[i]; 635 636 // If there is one scalar extracted, all scalar elements should have 637 // already been extracted by the code here. So no need to check for the 638 // existance of all of them. 639 if (SM.count(*OI)) 640 break; 641 642 SM[*OI] = Builder.CreateExtractElement(NewVector, Builder.getInt32(i)); 643 } 644 } 645 646 return HasVectorOperand; 647 } 648 649 void VectorBlockGenerator::copyInstScalarized(const Instruction *Inst, 650 ValueMapT &VectorMap, 651 VectorValueMapT &ScalarMaps) { 652 bool HasVectorOperand; 653 int VectorWidth = getVectorWidth(); 654 655 HasVectorOperand = extractScalarValues(Inst, VectorMap, ScalarMaps); 656 657 for (int VectorLane = 0; VectorLane < getVectorWidth(); VectorLane++) 658 copyInstScalar(Inst, ScalarMaps[VectorLane], GlobalMaps[VectorLane], 659 VLTS[VectorLane]); 660 661 if (!VectorType::isValidElementType(Inst->getType()) || !HasVectorOperand) 662 return; 663 664 // Make the result available as vector value. 665 VectorType *VectorType = VectorType::get(Inst->getType(), VectorWidth); 666 Value *Vector = UndefValue::get(VectorType); 667 668 for (int i = 0; i < VectorWidth; i++) 669 Vector = Builder.CreateInsertElement(Vector, ScalarMaps[i][Inst], 670 Builder.getInt32(i)); 671 672 VectorMap[Inst] = Vector; 673 } 674 675 int VectorBlockGenerator::getVectorWidth() { return GlobalMaps.size(); } 676 677 void VectorBlockGenerator::copyInstruction(const Instruction *Inst, 678 ValueMapT &VectorMap, 679 VectorValueMapT &ScalarMaps) { 680 // Terminator instructions control the control flow. They are explicitly 681 // expressed in the clast and do not need to be copied. 682 if (Inst->isTerminator()) 683 return; 684 685 if (canSynthesize(Inst, &P->getAnalysis<LoopInfo>(), &SE, 686 &Statement.getParent()->getRegion())) 687 return; 688 689 if (const LoadInst *Load = dyn_cast<LoadInst>(Inst)) { 690 generateLoad(Load, VectorMap, ScalarMaps); 691 return; 692 } 693 694 if (hasVectorOperands(Inst, VectorMap)) { 695 if (const StoreInst *Store = dyn_cast<StoreInst>(Inst)) { 696 copyStore(Store, VectorMap, ScalarMaps); 697 return; 698 } 699 700 if (const UnaryInstruction *Unary = dyn_cast<UnaryInstruction>(Inst)) { 701 copyUnaryInst(Unary, VectorMap, ScalarMaps); 702 return; 703 } 704 705 if (const BinaryOperator *Binary = dyn_cast<BinaryOperator>(Inst)) { 706 copyBinaryInst(Binary, VectorMap, ScalarMaps); 707 return; 708 } 709 710 // Falltrough: We generate scalar instructions, if we don't know how to 711 // generate vector code. 712 } 713 714 copyInstScalarized(Inst, VectorMap, ScalarMaps); 715 } 716 717 void VectorBlockGenerator::copyBB() { 718 BasicBlock *BB = Statement.getBasicBlock(); 719 BasicBlock *CopyBB = 720 SplitBlock(Builder.GetInsertBlock(), Builder.GetInsertPoint(), P); 721 CopyBB->setName("polly.stmt." + BB->getName()); 722 Builder.SetInsertPoint(CopyBB->begin()); 723 724 // Create two maps that store the mapping from the original instructions of 725 // the old basic block to their copies in the new basic block. Those maps 726 // are basic block local. 727 // 728 // As vector code generation is supported there is one map for scalar values 729 // and one for vector values. 730 // 731 // In case we just do scalar code generation, the vectorMap is not used and 732 // the scalarMap has just one dimension, which contains the mapping. 733 // 734 // In case vector code generation is done, an instruction may either appear 735 // in the vector map once (as it is calculating >vectorwidth< values at a 736 // time. Or (if the values are calculated using scalar operations), it 737 // appears once in every dimension of the scalarMap. 738 VectorValueMapT ScalarBlockMap(getVectorWidth()); 739 ValueMapT VectorBlockMap; 740 741 for (BasicBlock::const_iterator II = BB->begin(), IE = BB->end(); II != IE; 742 ++II) 743 copyInstruction(II, VectorBlockMap, ScalarBlockMap); 744 } 745