1 //===-- PPCTargetTransformInfo.cpp - PPC specific TTI ---------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include "PPCTargetTransformInfo.h" 10 #include "llvm/Analysis/CodeMetrics.h" 11 #include "llvm/Analysis/TargetTransformInfo.h" 12 #include "llvm/CodeGen/BasicTTIImpl.h" 13 #include "llvm/CodeGen/CostTable.h" 14 #include "llvm/CodeGen/TargetLowering.h" 15 #include "llvm/CodeGen/TargetSchedule.h" 16 #include "llvm/Support/CommandLine.h" 17 #include "llvm/Support/Debug.h" 18 using namespace llvm; 19 20 #define DEBUG_TYPE "ppctti" 21 22 static cl::opt<bool> DisablePPCConstHoist("disable-ppc-constant-hoisting", 23 cl::desc("disable constant hoisting on PPC"), cl::init(false), cl::Hidden); 24 25 // This is currently only used for the data prefetch pass which is only enabled 26 // for BG/Q by default. 27 static cl::opt<unsigned> 28 CacheLineSize("ppc-loop-prefetch-cache-line", cl::Hidden, cl::init(64), 29 cl::desc("The loop prefetch cache line size")); 30 31 static cl::opt<bool> 32 EnablePPCColdCC("ppc-enable-coldcc", cl::Hidden, cl::init(false), 33 cl::desc("Enable using coldcc calling conv for cold " 34 "internal functions")); 35 36 // The latency of mtctr is only justified if there are more than 4 37 // comparisons that will be removed as a result. 38 static cl::opt<unsigned> 39 SmallCTRLoopThreshold("min-ctr-loop-threshold", cl::init(4), cl::Hidden, 40 cl::desc("Loops with a constant trip count smaller than " 41 "this value will not use the count register.")); 42 43 //===----------------------------------------------------------------------===// 44 // 45 // PPC cost model. 46 // 47 //===----------------------------------------------------------------------===// 48 49 TargetTransformInfo::PopcntSupportKind 50 PPCTTIImpl::getPopcntSupport(unsigned TyWidth) { 51 assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2"); 52 if (ST->hasPOPCNTD() != PPCSubtarget::POPCNTD_Unavailable && TyWidth <= 64) 53 return ST->hasPOPCNTD() == PPCSubtarget::POPCNTD_Slow ? 54 TTI::PSK_SlowHardware : TTI::PSK_FastHardware; 55 return TTI::PSK_Software; 56 } 57 58 int PPCTTIImpl::getIntImmCost(const APInt &Imm, Type *Ty) { 59 if (DisablePPCConstHoist) 60 return BaseT::getIntImmCost(Imm, Ty); 61 62 assert(Ty->isIntegerTy()); 63 64 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 65 if (BitSize == 0) 66 return ~0U; 67 68 if (Imm == 0) 69 return TTI::TCC_Free; 70 71 if (Imm.getBitWidth() <= 64) { 72 if (isInt<16>(Imm.getSExtValue())) 73 return TTI::TCC_Basic; 74 75 if (isInt<32>(Imm.getSExtValue())) { 76 // A constant that can be materialized using lis. 77 if ((Imm.getZExtValue() & 0xFFFF) == 0) 78 return TTI::TCC_Basic; 79 80 return 2 * TTI::TCC_Basic; 81 } 82 } 83 84 return 4 * TTI::TCC_Basic; 85 } 86 87 int PPCTTIImpl::getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm, 88 Type *Ty) { 89 if (DisablePPCConstHoist) 90 return BaseT::getIntImmCost(IID, Idx, Imm, Ty); 91 92 assert(Ty->isIntegerTy()); 93 94 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 95 if (BitSize == 0) 96 return ~0U; 97 98 switch (IID) { 99 default: 100 return TTI::TCC_Free; 101 case Intrinsic::sadd_with_overflow: 102 case Intrinsic::uadd_with_overflow: 103 case Intrinsic::ssub_with_overflow: 104 case Intrinsic::usub_with_overflow: 105 if ((Idx == 1) && Imm.getBitWidth() <= 64 && isInt<16>(Imm.getSExtValue())) 106 return TTI::TCC_Free; 107 break; 108 case Intrinsic::experimental_stackmap: 109 if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue()))) 110 return TTI::TCC_Free; 111 break; 112 case Intrinsic::experimental_patchpoint_void: 113 case Intrinsic::experimental_patchpoint_i64: 114 if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue()))) 115 return TTI::TCC_Free; 116 break; 117 } 118 return PPCTTIImpl::getIntImmCost(Imm, Ty); 119 } 120 121 int PPCTTIImpl::getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm, 122 Type *Ty) { 123 if (DisablePPCConstHoist) 124 return BaseT::getIntImmCost(Opcode, Idx, Imm, Ty); 125 126 assert(Ty->isIntegerTy()); 127 128 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 129 if (BitSize == 0) 130 return ~0U; 131 132 unsigned ImmIdx = ~0U; 133 bool ShiftedFree = false, RunFree = false, UnsignedFree = false, 134 ZeroFree = false; 135 switch (Opcode) { 136 default: 137 return TTI::TCC_Free; 138 case Instruction::GetElementPtr: 139 // Always hoist the base address of a GetElementPtr. This prevents the 140 // creation of new constants for every base constant that gets constant 141 // folded with the offset. 142 if (Idx == 0) 143 return 2 * TTI::TCC_Basic; 144 return TTI::TCC_Free; 145 case Instruction::And: 146 RunFree = true; // (for the rotate-and-mask instructions) 147 LLVM_FALLTHROUGH; 148 case Instruction::Add: 149 case Instruction::Or: 150 case Instruction::Xor: 151 ShiftedFree = true; 152 LLVM_FALLTHROUGH; 153 case Instruction::Sub: 154 case Instruction::Mul: 155 case Instruction::Shl: 156 case Instruction::LShr: 157 case Instruction::AShr: 158 ImmIdx = 1; 159 break; 160 case Instruction::ICmp: 161 UnsignedFree = true; 162 ImmIdx = 1; 163 // Zero comparisons can use record-form instructions. 164 LLVM_FALLTHROUGH; 165 case Instruction::Select: 166 ZeroFree = true; 167 break; 168 case Instruction::PHI: 169 case Instruction::Call: 170 case Instruction::Ret: 171 case Instruction::Load: 172 case Instruction::Store: 173 break; 174 } 175 176 if (ZeroFree && Imm == 0) 177 return TTI::TCC_Free; 178 179 if (Idx == ImmIdx && Imm.getBitWidth() <= 64) { 180 if (isInt<16>(Imm.getSExtValue())) 181 return TTI::TCC_Free; 182 183 if (RunFree) { 184 if (Imm.getBitWidth() <= 32 && 185 (isShiftedMask_32(Imm.getZExtValue()) || 186 isShiftedMask_32(~Imm.getZExtValue()))) 187 return TTI::TCC_Free; 188 189 if (ST->isPPC64() && 190 (isShiftedMask_64(Imm.getZExtValue()) || 191 isShiftedMask_64(~Imm.getZExtValue()))) 192 return TTI::TCC_Free; 193 } 194 195 if (UnsignedFree && isUInt<16>(Imm.getZExtValue())) 196 return TTI::TCC_Free; 197 198 if (ShiftedFree && (Imm.getZExtValue() & 0xFFFF) == 0) 199 return TTI::TCC_Free; 200 } 201 202 return PPCTTIImpl::getIntImmCost(Imm, Ty); 203 } 204 205 unsigned PPCTTIImpl::getUserCost(const User *U, 206 ArrayRef<const Value *> Operands) { 207 if (U->getType()->isVectorTy()) { 208 // Instructions that need to be split should cost more. 209 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, U->getType()); 210 return LT.first * BaseT::getUserCost(U, Operands); 211 } 212 213 return BaseT::getUserCost(U, Operands); 214 } 215 216 bool PPCTTIImpl::mightUseCTR(BasicBlock *BB, 217 TargetLibraryInfo *LibInfo) { 218 const PPCTargetMachine &TM = ST->getTargetMachine(); 219 220 // Loop through the inline asm constraints and look for something that 221 // clobbers ctr. 222 auto asmClobbersCTR = [](InlineAsm *IA) { 223 InlineAsm::ConstraintInfoVector CIV = IA->ParseConstraints(); 224 for (unsigned i = 0, ie = CIV.size(); i < ie; ++i) { 225 InlineAsm::ConstraintInfo &C = CIV[i]; 226 if (C.Type != InlineAsm::isInput) 227 for (unsigned j = 0, je = C.Codes.size(); j < je; ++j) 228 if (StringRef(C.Codes[j]).equals_lower("{ctr}")) 229 return true; 230 } 231 return false; 232 }; 233 234 // Determining the address of a TLS variable results in a function call in 235 // certain TLS models. 236 std::function<bool(const Value*)> memAddrUsesCTR = 237 [&memAddrUsesCTR, &TM](const Value *MemAddr) -> bool { 238 const auto *GV = dyn_cast<GlobalValue>(MemAddr); 239 if (!GV) { 240 // Recurse to check for constants that refer to TLS global variables. 241 if (const auto *CV = dyn_cast<Constant>(MemAddr)) 242 for (const auto &CO : CV->operands()) 243 if (memAddrUsesCTR(CO)) 244 return true; 245 246 return false; 247 } 248 249 if (!GV->isThreadLocal()) 250 return false; 251 TLSModel::Model Model = TM.getTLSModel(GV); 252 return Model == TLSModel::GeneralDynamic || 253 Model == TLSModel::LocalDynamic; 254 }; 255 256 auto isLargeIntegerTy = [](bool Is32Bit, Type *Ty) { 257 if (IntegerType *ITy = dyn_cast<IntegerType>(Ty)) 258 return ITy->getBitWidth() > (Is32Bit ? 32U : 64U); 259 260 return false; 261 }; 262 263 for (BasicBlock::iterator J = BB->begin(), JE = BB->end(); 264 J != JE; ++J) { 265 if (CallInst *CI = dyn_cast<CallInst>(J)) { 266 // Inline ASM is okay, unless it clobbers the ctr register. 267 if (InlineAsm *IA = dyn_cast<InlineAsm>(CI->getCalledValue())) { 268 if (asmClobbersCTR(IA)) 269 return true; 270 continue; 271 } 272 273 if (Function *F = CI->getCalledFunction()) { 274 // Most intrinsics don't become function calls, but some might. 275 // sin, cos, exp and log are always calls. 276 unsigned Opcode = 0; 277 if (F->getIntrinsicID() != Intrinsic::not_intrinsic) { 278 switch (F->getIntrinsicID()) { 279 default: continue; 280 // If we have a call to ppc_is_decremented_ctr_nonzero, or ppc_mtctr 281 // we're definitely using CTR. 282 case Intrinsic::set_loop_iterations: 283 case Intrinsic::loop_decrement: 284 return true; 285 286 // VisualStudio defines setjmp as _setjmp 287 #if defined(_MSC_VER) && defined(setjmp) && \ 288 !defined(setjmp_undefined_for_msvc) 289 # pragma push_macro("setjmp") 290 # undef setjmp 291 # define setjmp_undefined_for_msvc 292 #endif 293 294 case Intrinsic::setjmp: 295 296 #if defined(_MSC_VER) && defined(setjmp_undefined_for_msvc) 297 // let's return it to _setjmp state 298 # pragma pop_macro("setjmp") 299 # undef setjmp_undefined_for_msvc 300 #endif 301 302 case Intrinsic::longjmp: 303 304 // Exclude eh_sjlj_setjmp; we don't need to exclude eh_sjlj_longjmp 305 // because, although it does clobber the counter register, the 306 // control can't then return to inside the loop unless there is also 307 // an eh_sjlj_setjmp. 308 case Intrinsic::eh_sjlj_setjmp: 309 310 case Intrinsic::memcpy: 311 case Intrinsic::memmove: 312 case Intrinsic::memset: 313 case Intrinsic::powi: 314 case Intrinsic::log: 315 case Intrinsic::log2: 316 case Intrinsic::log10: 317 case Intrinsic::exp: 318 case Intrinsic::exp2: 319 case Intrinsic::pow: 320 case Intrinsic::sin: 321 case Intrinsic::cos: 322 return true; 323 case Intrinsic::copysign: 324 if (CI->getArgOperand(0)->getType()->getScalarType()-> 325 isPPC_FP128Ty()) 326 return true; 327 else 328 continue; // ISD::FCOPYSIGN is never a library call. 329 case Intrinsic::sqrt: Opcode = ISD::FSQRT; break; 330 case Intrinsic::floor: Opcode = ISD::FFLOOR; break; 331 case Intrinsic::ceil: Opcode = ISD::FCEIL; break; 332 case Intrinsic::trunc: Opcode = ISD::FTRUNC; break; 333 case Intrinsic::rint: Opcode = ISD::FRINT; break; 334 case Intrinsic::nearbyint: Opcode = ISD::FNEARBYINT; break; 335 case Intrinsic::round: Opcode = ISD::FROUND; break; 336 case Intrinsic::minnum: Opcode = ISD::FMINNUM; break; 337 case Intrinsic::maxnum: Opcode = ISD::FMAXNUM; break; 338 case Intrinsic::umul_with_overflow: Opcode = ISD::UMULO; break; 339 case Intrinsic::smul_with_overflow: Opcode = ISD::SMULO; break; 340 } 341 } 342 343 // PowerPC does not use [US]DIVREM or other library calls for 344 // operations on regular types which are not otherwise library calls 345 // (i.e. soft float or atomics). If adapting for targets that do, 346 // additional care is required here. 347 348 LibFunc Func; 349 if (!F->hasLocalLinkage() && F->hasName() && LibInfo && 350 LibInfo->getLibFunc(F->getName(), Func) && 351 LibInfo->hasOptimizedCodeGen(Func)) { 352 // Non-read-only functions are never treated as intrinsics. 353 if (!CI->onlyReadsMemory()) 354 return true; 355 356 // Conversion happens only for FP calls. 357 if (!CI->getArgOperand(0)->getType()->isFloatingPointTy()) 358 return true; 359 360 switch (Func) { 361 default: return true; 362 case LibFunc_copysign: 363 case LibFunc_copysignf: 364 continue; // ISD::FCOPYSIGN is never a library call. 365 case LibFunc_copysignl: 366 return true; 367 case LibFunc_fabs: 368 case LibFunc_fabsf: 369 case LibFunc_fabsl: 370 continue; // ISD::FABS is never a library call. 371 case LibFunc_sqrt: 372 case LibFunc_sqrtf: 373 case LibFunc_sqrtl: 374 Opcode = ISD::FSQRT; break; 375 case LibFunc_floor: 376 case LibFunc_floorf: 377 case LibFunc_floorl: 378 Opcode = ISD::FFLOOR; break; 379 case LibFunc_nearbyint: 380 case LibFunc_nearbyintf: 381 case LibFunc_nearbyintl: 382 Opcode = ISD::FNEARBYINT; break; 383 case LibFunc_ceil: 384 case LibFunc_ceilf: 385 case LibFunc_ceill: 386 Opcode = ISD::FCEIL; break; 387 case LibFunc_rint: 388 case LibFunc_rintf: 389 case LibFunc_rintl: 390 Opcode = ISD::FRINT; break; 391 case LibFunc_round: 392 case LibFunc_roundf: 393 case LibFunc_roundl: 394 Opcode = ISD::FROUND; break; 395 case LibFunc_trunc: 396 case LibFunc_truncf: 397 case LibFunc_truncl: 398 Opcode = ISD::FTRUNC; break; 399 case LibFunc_fmin: 400 case LibFunc_fminf: 401 case LibFunc_fminl: 402 Opcode = ISD::FMINNUM; break; 403 case LibFunc_fmax: 404 case LibFunc_fmaxf: 405 case LibFunc_fmaxl: 406 Opcode = ISD::FMAXNUM; break; 407 } 408 } 409 410 if (Opcode) { 411 EVT EVTy = 412 TLI->getValueType(DL, CI->getArgOperand(0)->getType(), true); 413 414 if (EVTy == MVT::Other) 415 return true; 416 417 if (TLI->isOperationLegalOrCustom(Opcode, EVTy)) 418 continue; 419 else if (EVTy.isVector() && 420 TLI->isOperationLegalOrCustom(Opcode, EVTy.getScalarType())) 421 continue; 422 423 return true; 424 } 425 } 426 427 return true; 428 } else if (isa<BinaryOperator>(J) && 429 J->getType()->getScalarType()->isPPC_FP128Ty()) { 430 // Most operations on ppc_f128 values become calls. 431 return true; 432 } else if (isa<UIToFPInst>(J) || isa<SIToFPInst>(J) || 433 isa<FPToUIInst>(J) || isa<FPToSIInst>(J)) { 434 CastInst *CI = cast<CastInst>(J); 435 if (CI->getSrcTy()->getScalarType()->isPPC_FP128Ty() || 436 CI->getDestTy()->getScalarType()->isPPC_FP128Ty() || 437 isLargeIntegerTy(!TM.isPPC64(), CI->getSrcTy()->getScalarType()) || 438 isLargeIntegerTy(!TM.isPPC64(), CI->getDestTy()->getScalarType())) 439 return true; 440 } else if (isLargeIntegerTy(!TM.isPPC64(), 441 J->getType()->getScalarType()) && 442 (J->getOpcode() == Instruction::UDiv || 443 J->getOpcode() == Instruction::SDiv || 444 J->getOpcode() == Instruction::URem || 445 J->getOpcode() == Instruction::SRem)) { 446 return true; 447 } else if (!TM.isPPC64() && 448 isLargeIntegerTy(false, J->getType()->getScalarType()) && 449 (J->getOpcode() == Instruction::Shl || 450 J->getOpcode() == Instruction::AShr || 451 J->getOpcode() == Instruction::LShr)) { 452 // Only on PPC32, for 128-bit integers (specifically not 64-bit 453 // integers), these might be runtime calls. 454 return true; 455 } else if (isa<IndirectBrInst>(J) || isa<InvokeInst>(J)) { 456 // On PowerPC, indirect jumps use the counter register. 457 return true; 458 } else if (SwitchInst *SI = dyn_cast<SwitchInst>(J)) { 459 if (SI->getNumCases() + 1 >= (unsigned)TLI->getMinimumJumpTableEntries()) 460 return true; 461 } 462 463 // FREM is always a call. 464 if (J->getOpcode() == Instruction::FRem) 465 return true; 466 467 if (ST->useSoftFloat()) { 468 switch(J->getOpcode()) { 469 case Instruction::FAdd: 470 case Instruction::FSub: 471 case Instruction::FMul: 472 case Instruction::FDiv: 473 case Instruction::FPTrunc: 474 case Instruction::FPExt: 475 case Instruction::FPToUI: 476 case Instruction::FPToSI: 477 case Instruction::UIToFP: 478 case Instruction::SIToFP: 479 case Instruction::FCmp: 480 return true; 481 } 482 } 483 484 for (Value *Operand : J->operands()) 485 if (memAddrUsesCTR(Operand)) 486 return true; 487 } 488 489 return false; 490 } 491 492 bool PPCTTIImpl::isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE, 493 AssumptionCache &AC, 494 TargetLibraryInfo *LibInfo, 495 TTI::HardwareLoopInfo &HWLoopInfo) { 496 const PPCTargetMachine &TM = ST->getTargetMachine(); 497 TargetSchedModel SchedModel; 498 SchedModel.init(ST); 499 500 // Do not convert small short loops to CTR loop. 501 unsigned ConstTripCount = SE.getSmallConstantTripCount(L); 502 if (ConstTripCount && ConstTripCount < SmallCTRLoopThreshold) { 503 SmallPtrSet<const Value *, 32> EphValues; 504 CodeMetrics::collectEphemeralValues(L, &AC, EphValues); 505 CodeMetrics Metrics; 506 for (BasicBlock *BB : L->blocks()) 507 Metrics.analyzeBasicBlock(BB, *this, EphValues); 508 // 6 is an approximate latency for the mtctr instruction. 509 if (Metrics.NumInsts <= (6 * SchedModel.getIssueWidth())) 510 return false; 511 } 512 513 // We don't want to spill/restore the counter register, and so we don't 514 // want to use the counter register if the loop contains calls. 515 for (Loop::block_iterator I = L->block_begin(), IE = L->block_end(); 516 I != IE; ++I) 517 if (mightUseCTR(*I, LibInfo)) 518 return false; 519 520 SmallVector<BasicBlock*, 4> ExitingBlocks; 521 L->getExitingBlocks(ExitingBlocks); 522 523 // If there is an exit edge known to be frequently taken, 524 // we should not transform this loop. 525 for (auto &BB : ExitingBlocks) { 526 Instruction *TI = BB->getTerminator(); 527 if (!TI) continue; 528 529 if (BranchInst *BI = dyn_cast<BranchInst>(TI)) { 530 uint64_t TrueWeight = 0, FalseWeight = 0; 531 if (!BI->isConditional() || 532 !BI->extractProfMetadata(TrueWeight, FalseWeight)) 533 continue; 534 535 // If the exit path is more frequent than the loop path, 536 // we return here without further analysis for this loop. 537 bool TrueIsExit = !L->contains(BI->getSuccessor(0)); 538 if (( TrueIsExit && FalseWeight < TrueWeight) || 539 (!TrueIsExit && FalseWeight > TrueWeight)) 540 return false; 541 } 542 } 543 544 LLVMContext &C = L->getHeader()->getContext(); 545 HWLoopInfo.CountType = TM.isPPC64() ? 546 Type::getInt64Ty(C) : Type::getInt32Ty(C); 547 HWLoopInfo.LoopDecrement = ConstantInt::get(HWLoopInfo.CountType, 1); 548 return true; 549 } 550 551 void PPCTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE, 552 TTI::UnrollingPreferences &UP) { 553 if (ST->getDarwinDirective() == PPC::DIR_A2) { 554 // The A2 is in-order with a deep pipeline, and concatenation unrolling 555 // helps expose latency-hiding opportunities to the instruction scheduler. 556 UP.Partial = UP.Runtime = true; 557 558 // We unroll a lot on the A2 (hundreds of instructions), and the benefits 559 // often outweigh the cost of a division to compute the trip count. 560 UP.AllowExpensiveTripCount = true; 561 } 562 563 BaseT::getUnrollingPreferences(L, SE, UP); 564 } 565 566 // This function returns true to allow using coldcc calling convention. 567 // Returning true results in coldcc being used for functions which are cold at 568 // all call sites when the callers of the functions are not calling any other 569 // non coldcc functions. 570 bool PPCTTIImpl::useColdCCForColdCall(Function &F) { 571 return EnablePPCColdCC; 572 } 573 574 bool PPCTTIImpl::enableAggressiveInterleaving(bool LoopHasReductions) { 575 // On the A2, always unroll aggressively. For QPX unaligned loads, we depend 576 // on combining the loads generated for consecutive accesses, and failure to 577 // do so is particularly expensive. This makes it much more likely (compared 578 // to only using concatenation unrolling). 579 if (ST->getDarwinDirective() == PPC::DIR_A2) 580 return true; 581 582 return LoopHasReductions; 583 } 584 585 const PPCTTIImpl::TTI::MemCmpExpansionOptions * 586 PPCTTIImpl::enableMemCmpExpansion(bool IsZeroCmp) const { 587 static const auto Options = []() { 588 TTI::MemCmpExpansionOptions Options; 589 Options.LoadSizes.push_back(8); 590 Options.LoadSizes.push_back(4); 591 Options.LoadSizes.push_back(2); 592 Options.LoadSizes.push_back(1); 593 return Options; 594 }(); 595 return &Options; 596 } 597 598 bool PPCTTIImpl::enableInterleavedAccessVectorization() { 599 return true; 600 } 601 602 unsigned PPCTTIImpl::getNumberOfRegisters(bool Vector) { 603 if (Vector && !ST->hasAltivec() && !ST->hasQPX()) 604 return 0; 605 return ST->hasVSX() ? 64 : 32; 606 } 607 608 unsigned PPCTTIImpl::getRegisterBitWidth(bool Vector) const { 609 if (Vector) { 610 if (ST->hasQPX()) return 256; 611 if (ST->hasAltivec()) return 128; 612 return 0; 613 } 614 615 if (ST->isPPC64()) 616 return 64; 617 return 32; 618 619 } 620 621 unsigned PPCTTIImpl::getCacheLineSize() { 622 // Check first if the user specified a custom line size. 623 if (CacheLineSize.getNumOccurrences() > 0) 624 return CacheLineSize; 625 626 // On P7, P8 or P9 we have a cache line size of 128. 627 unsigned Directive = ST->getDarwinDirective(); 628 if (Directive == PPC::DIR_PWR7 || Directive == PPC::DIR_PWR8 || 629 Directive == PPC::DIR_PWR9) 630 return 128; 631 632 // On other processors return a default of 64 bytes. 633 return 64; 634 } 635 636 unsigned PPCTTIImpl::getPrefetchDistance() { 637 // This seems like a reasonable default for the BG/Q (this pass is enabled, by 638 // default, only on the BG/Q). 639 return 300; 640 } 641 642 unsigned PPCTTIImpl::getMaxInterleaveFactor(unsigned VF) { 643 unsigned Directive = ST->getDarwinDirective(); 644 // The 440 has no SIMD support, but floating-point instructions 645 // have a 5-cycle latency, so unroll by 5x for latency hiding. 646 if (Directive == PPC::DIR_440) 647 return 5; 648 649 // The A2 has no SIMD support, but floating-point instructions 650 // have a 6-cycle latency, so unroll by 6x for latency hiding. 651 if (Directive == PPC::DIR_A2) 652 return 6; 653 654 // FIXME: For lack of any better information, do no harm... 655 if (Directive == PPC::DIR_E500mc || Directive == PPC::DIR_E5500) 656 return 1; 657 658 // For P7 and P8, floating-point instructions have a 6-cycle latency and 659 // there are two execution units, so unroll by 12x for latency hiding. 660 // FIXME: the same for P9 as previous gen until POWER9 scheduling is ready 661 if (Directive == PPC::DIR_PWR7 || Directive == PPC::DIR_PWR8 || 662 Directive == PPC::DIR_PWR9) 663 return 12; 664 665 // For most things, modern systems have two execution units (and 666 // out-of-order execution). 667 return 2; 668 } 669 670 // Adjust the cost of vector instructions on targets which there is overlap 671 // between the vector and scalar units, thereby reducing the overall throughput 672 // of vector code wrt. scalar code. 673 int PPCTTIImpl::vectorCostAdjustment(int Cost, unsigned Opcode, Type *Ty1, 674 Type *Ty2) { 675 if (!ST->vectorsUseTwoUnits() || !Ty1->isVectorTy()) 676 return Cost; 677 678 std::pair<int, MVT> LT1 = TLI->getTypeLegalizationCost(DL, Ty1); 679 // If type legalization involves splitting the vector, we don't want to 680 // double the cost at every step - only the last step. 681 if (LT1.first != 1 || !LT1.second.isVector()) 682 return Cost; 683 684 int ISD = TLI->InstructionOpcodeToISD(Opcode); 685 if (TLI->isOperationExpand(ISD, LT1.second)) 686 return Cost; 687 688 if (Ty2) { 689 std::pair<int, MVT> LT2 = TLI->getTypeLegalizationCost(DL, Ty2); 690 if (LT2.first != 1 || !LT2.second.isVector()) 691 return Cost; 692 } 693 694 return Cost * 2; 695 } 696 697 int PPCTTIImpl::getArithmeticInstrCost( 698 unsigned Opcode, Type *Ty, TTI::OperandValueKind Op1Info, 699 TTI::OperandValueKind Op2Info, TTI::OperandValueProperties Opd1PropInfo, 700 TTI::OperandValueProperties Opd2PropInfo, ArrayRef<const Value *> Args) { 701 assert(TLI->InstructionOpcodeToISD(Opcode) && "Invalid opcode"); 702 703 // Fallback to the default implementation. 704 int Cost = BaseT::getArithmeticInstrCost(Opcode, Ty, Op1Info, Op2Info, 705 Opd1PropInfo, Opd2PropInfo); 706 return vectorCostAdjustment(Cost, Opcode, Ty, nullptr); 707 } 708 709 int PPCTTIImpl::getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index, 710 Type *SubTp) { 711 // Legalize the type. 712 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp); 713 714 // PPC, for both Altivec/VSX and QPX, support cheap arbitrary permutations 715 // (at least in the sense that there need only be one non-loop-invariant 716 // instruction). We need one such shuffle instruction for each actual 717 // register (this is not true for arbitrary shuffles, but is true for the 718 // structured types of shuffles covered by TTI::ShuffleKind). 719 return vectorCostAdjustment(LT.first, Instruction::ShuffleVector, Tp, 720 nullptr); 721 } 722 723 int PPCTTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, 724 const Instruction *I) { 725 assert(TLI->InstructionOpcodeToISD(Opcode) && "Invalid opcode"); 726 727 int Cost = BaseT::getCastInstrCost(Opcode, Dst, Src); 728 return vectorCostAdjustment(Cost, Opcode, Dst, Src); 729 } 730 731 int PPCTTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, 732 const Instruction *I) { 733 int Cost = BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, I); 734 return vectorCostAdjustment(Cost, Opcode, ValTy, nullptr); 735 } 736 737 int PPCTTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index) { 738 assert(Val->isVectorTy() && "This must be a vector type"); 739 740 int ISD = TLI->InstructionOpcodeToISD(Opcode); 741 assert(ISD && "Invalid opcode"); 742 743 int Cost = BaseT::getVectorInstrCost(Opcode, Val, Index); 744 Cost = vectorCostAdjustment(Cost, Opcode, Val, nullptr); 745 746 if (ST->hasVSX() && Val->getScalarType()->isDoubleTy()) { 747 // Double-precision scalars are already located in index #0 (or #1 if LE). 748 if (ISD == ISD::EXTRACT_VECTOR_ELT && 749 Index == (ST->isLittleEndian() ? 1 : 0)) 750 return 0; 751 752 return Cost; 753 754 } else if (ST->hasQPX() && Val->getScalarType()->isFloatingPointTy()) { 755 // Floating point scalars are already located in index #0. 756 if (Index == 0) 757 return 0; 758 759 return Cost; 760 } 761 762 // Estimated cost of a load-hit-store delay. This was obtained 763 // experimentally as a minimum needed to prevent unprofitable 764 // vectorization for the paq8p benchmark. It may need to be 765 // raised further if other unprofitable cases remain. 766 unsigned LHSPenalty = 2; 767 if (ISD == ISD::INSERT_VECTOR_ELT) 768 LHSPenalty += 7; 769 770 // Vector element insert/extract with Altivec is very expensive, 771 // because they require store and reload with the attendant 772 // processor stall for load-hit-store. Until VSX is available, 773 // these need to be estimated as very costly. 774 if (ISD == ISD::EXTRACT_VECTOR_ELT || 775 ISD == ISD::INSERT_VECTOR_ELT) 776 return LHSPenalty + Cost; 777 778 return Cost; 779 } 780 781 int PPCTTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment, 782 unsigned AddressSpace, const Instruction *I) { 783 // Legalize the type. 784 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Src); 785 assert((Opcode == Instruction::Load || Opcode == Instruction::Store) && 786 "Invalid Opcode"); 787 788 int Cost = BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace); 789 Cost = vectorCostAdjustment(Cost, Opcode, Src, nullptr); 790 791 bool IsAltivecType = ST->hasAltivec() && 792 (LT.second == MVT::v16i8 || LT.second == MVT::v8i16 || 793 LT.second == MVT::v4i32 || LT.second == MVT::v4f32); 794 bool IsVSXType = ST->hasVSX() && 795 (LT.second == MVT::v2f64 || LT.second == MVT::v2i64); 796 bool IsQPXType = ST->hasQPX() && 797 (LT.second == MVT::v4f64 || LT.second == MVT::v4f32); 798 799 // VSX has 32b/64b load instructions. Legalization can handle loading of 800 // 32b/64b to VSR correctly and cheaply. But BaseT::getMemoryOpCost and 801 // PPCTargetLowering can't compute the cost appropriately. So here we 802 // explicitly check this case. 803 unsigned MemBytes = Src->getPrimitiveSizeInBits(); 804 if (Opcode == Instruction::Load && ST->hasVSX() && IsAltivecType && 805 (MemBytes == 64 || (ST->hasP8Vector() && MemBytes == 32))) 806 return 1; 807 808 // Aligned loads and stores are easy. 809 unsigned SrcBytes = LT.second.getStoreSize(); 810 if (!SrcBytes || !Alignment || Alignment >= SrcBytes) 811 return Cost; 812 813 // If we can use the permutation-based load sequence, then this is also 814 // relatively cheap (not counting loop-invariant instructions): one load plus 815 // one permute (the last load in a series has extra cost, but we're 816 // neglecting that here). Note that on the P7, we could do unaligned loads 817 // for Altivec types using the VSX instructions, but that's more expensive 818 // than using the permutation-based load sequence. On the P8, that's no 819 // longer true. 820 if (Opcode == Instruction::Load && 821 ((!ST->hasP8Vector() && IsAltivecType) || IsQPXType) && 822 Alignment >= LT.second.getScalarType().getStoreSize()) 823 return Cost + LT.first; // Add the cost of the permutations. 824 825 // For VSX, we can do unaligned loads and stores on Altivec/VSX types. On the 826 // P7, unaligned vector loads are more expensive than the permutation-based 827 // load sequence, so that might be used instead, but regardless, the net cost 828 // is about the same (not counting loop-invariant instructions). 829 if (IsVSXType || (ST->hasVSX() && IsAltivecType)) 830 return Cost; 831 832 // Newer PPC supports unaligned memory access. 833 if (TLI->allowsMisalignedMemoryAccesses(LT.second, 0)) 834 return Cost; 835 836 // PPC in general does not support unaligned loads and stores. They'll need 837 // to be decomposed based on the alignment factor. 838 839 // Add the cost of each scalar load or store. 840 Cost += LT.first*(SrcBytes/Alignment-1); 841 842 // For a vector type, there is also scalarization overhead (only for 843 // stores, loads are expanded using the vector-load + permutation sequence, 844 // which is much less expensive). 845 if (Src->isVectorTy() && Opcode == Instruction::Store) 846 for (int i = 0, e = Src->getVectorNumElements(); i < e; ++i) 847 Cost += getVectorInstrCost(Instruction::ExtractElement, Src, i); 848 849 return Cost; 850 } 851 852 int PPCTTIImpl::getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, 853 unsigned Factor, 854 ArrayRef<unsigned> Indices, 855 unsigned Alignment, 856 unsigned AddressSpace, 857 bool UseMaskForCond, 858 bool UseMaskForGaps) { 859 if (UseMaskForCond || UseMaskForGaps) 860 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices, 861 Alignment, AddressSpace, 862 UseMaskForCond, UseMaskForGaps); 863 864 assert(isa<VectorType>(VecTy) && 865 "Expect a vector type for interleaved memory op"); 866 867 // Legalize the type. 868 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, VecTy); 869 870 // Firstly, the cost of load/store operation. 871 int Cost = getMemoryOpCost(Opcode, VecTy, Alignment, AddressSpace); 872 873 // PPC, for both Altivec/VSX and QPX, support cheap arbitrary permutations 874 // (at least in the sense that there need only be one non-loop-invariant 875 // instruction). For each result vector, we need one shuffle per incoming 876 // vector (except that the first shuffle can take two incoming vectors 877 // because it does not need to take itself). 878 Cost += Factor*(LT.first-1); 879 880 return Cost; 881 } 882 883