1 //===-- PPCTargetTransformInfo.cpp - PPC specific TTI ---------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include "PPCTargetTransformInfo.h" 10 #include "llvm/Analysis/CodeMetrics.h" 11 #include "llvm/Analysis/TargetLibraryInfo.h" 12 #include "llvm/Analysis/TargetTransformInfo.h" 13 #include "llvm/CodeGen/BasicTTIImpl.h" 14 #include "llvm/CodeGen/CostTable.h" 15 #include "llvm/CodeGen/TargetLowering.h" 16 #include "llvm/CodeGen/TargetSchedule.h" 17 #include "llvm/IR/IntrinsicsPowerPC.h" 18 #include "llvm/Support/CommandLine.h" 19 #include "llvm/Support/Debug.h" 20 #include "llvm/Support/KnownBits.h" 21 #include "llvm/Transforms/InstCombine/InstCombiner.h" 22 #include "llvm/Transforms/Utils/Local.h" 23 24 using namespace llvm; 25 26 #define DEBUG_TYPE "ppctti" 27 28 static cl::opt<bool> DisablePPCConstHoist("disable-ppc-constant-hoisting", 29 cl::desc("disable constant hoisting on PPC"), cl::init(false), cl::Hidden); 30 31 // This is currently only used for the data prefetch pass 32 static cl::opt<unsigned> 33 CacheLineSize("ppc-loop-prefetch-cache-line", cl::Hidden, cl::init(64), 34 cl::desc("The loop prefetch cache line size")); 35 36 static cl::opt<bool> 37 EnablePPCColdCC("ppc-enable-coldcc", cl::Hidden, cl::init(false), 38 cl::desc("Enable using coldcc calling conv for cold " 39 "internal functions")); 40 41 static cl::opt<bool> 42 LsrNoInsnsCost("ppc-lsr-no-insns-cost", cl::Hidden, cl::init(false), 43 cl::desc("Do not add instruction count to lsr cost model")); 44 45 // The latency of mtctr is only justified if there are more than 4 46 // comparisons that will be removed as a result. 47 static cl::opt<unsigned> 48 SmallCTRLoopThreshold("min-ctr-loop-threshold", cl::init(4), cl::Hidden, 49 cl::desc("Loops with a constant trip count smaller than " 50 "this value will not use the count register.")); 51 52 //===----------------------------------------------------------------------===// 53 // 54 // PPC cost model. 55 // 56 //===----------------------------------------------------------------------===// 57 58 TargetTransformInfo::PopcntSupportKind 59 PPCTTIImpl::getPopcntSupport(unsigned TyWidth) { 60 assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2"); 61 if (ST->hasPOPCNTD() != PPCSubtarget::POPCNTD_Unavailable && TyWidth <= 64) 62 return ST->hasPOPCNTD() == PPCSubtarget::POPCNTD_Slow ? 63 TTI::PSK_SlowHardware : TTI::PSK_FastHardware; 64 return TTI::PSK_Software; 65 } 66 67 Optional<Instruction *> 68 PPCTTIImpl::instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const { 69 Intrinsic::ID IID = II.getIntrinsicID(); 70 switch (IID) { 71 default: 72 break; 73 case Intrinsic::ppc_altivec_lvx: 74 case Intrinsic::ppc_altivec_lvxl: 75 // Turn PPC lvx -> load if the pointer is known aligned. 76 if (getOrEnforceKnownAlignment( 77 II.getArgOperand(0), Align(16), IC.getDataLayout(), &II, 78 &IC.getAssumptionCache(), &IC.getDominatorTree()) >= 16) { 79 Value *Ptr = IC.Builder.CreateBitCast( 80 II.getArgOperand(0), PointerType::getUnqual(II.getType())); 81 return new LoadInst(II.getType(), Ptr, "", false, Align(16)); 82 } 83 break; 84 case Intrinsic::ppc_vsx_lxvw4x: 85 case Intrinsic::ppc_vsx_lxvd2x: { 86 // Turn PPC VSX loads into normal loads. 87 Value *Ptr = IC.Builder.CreateBitCast(II.getArgOperand(0), 88 PointerType::getUnqual(II.getType())); 89 return new LoadInst(II.getType(), Ptr, Twine(""), false, Align(1)); 90 } 91 case Intrinsic::ppc_altivec_stvx: 92 case Intrinsic::ppc_altivec_stvxl: 93 // Turn stvx -> store if the pointer is known aligned. 94 if (getOrEnforceKnownAlignment( 95 II.getArgOperand(1), Align(16), IC.getDataLayout(), &II, 96 &IC.getAssumptionCache(), &IC.getDominatorTree()) >= 16) { 97 Type *OpPtrTy = PointerType::getUnqual(II.getArgOperand(0)->getType()); 98 Value *Ptr = IC.Builder.CreateBitCast(II.getArgOperand(1), OpPtrTy); 99 return new StoreInst(II.getArgOperand(0), Ptr, false, Align(16)); 100 } 101 break; 102 case Intrinsic::ppc_vsx_stxvw4x: 103 case Intrinsic::ppc_vsx_stxvd2x: { 104 // Turn PPC VSX stores into normal stores. 105 Type *OpPtrTy = PointerType::getUnqual(II.getArgOperand(0)->getType()); 106 Value *Ptr = IC.Builder.CreateBitCast(II.getArgOperand(1), OpPtrTy); 107 return new StoreInst(II.getArgOperand(0), Ptr, false, Align(1)); 108 } 109 case Intrinsic::ppc_altivec_vperm: 110 // Turn vperm(V1,V2,mask) -> shuffle(V1,V2,mask) if mask is a constant. 111 // Note that ppc_altivec_vperm has a big-endian bias, so when creating 112 // a vectorshuffle for little endian, we must undo the transformation 113 // performed on vec_perm in altivec.h. That is, we must complement 114 // the permutation mask with respect to 31 and reverse the order of 115 // V1 and V2. 116 if (Constant *Mask = dyn_cast<Constant>(II.getArgOperand(2))) { 117 assert(cast<FixedVectorType>(Mask->getType())->getNumElements() == 16 && 118 "Bad type for intrinsic!"); 119 120 // Check that all of the elements are integer constants or undefs. 121 bool AllEltsOk = true; 122 for (unsigned i = 0; i != 16; ++i) { 123 Constant *Elt = Mask->getAggregateElement(i); 124 if (!Elt || !(isa<ConstantInt>(Elt) || isa<UndefValue>(Elt))) { 125 AllEltsOk = false; 126 break; 127 } 128 } 129 130 if (AllEltsOk) { 131 // Cast the input vectors to byte vectors. 132 Value *Op0 = 133 IC.Builder.CreateBitCast(II.getArgOperand(0), Mask->getType()); 134 Value *Op1 = 135 IC.Builder.CreateBitCast(II.getArgOperand(1), Mask->getType()); 136 Value *Result = UndefValue::get(Op0->getType()); 137 138 // Only extract each element once. 139 Value *ExtractedElts[32]; 140 memset(ExtractedElts, 0, sizeof(ExtractedElts)); 141 142 for (unsigned i = 0; i != 16; ++i) { 143 if (isa<UndefValue>(Mask->getAggregateElement(i))) 144 continue; 145 unsigned Idx = 146 cast<ConstantInt>(Mask->getAggregateElement(i))->getZExtValue(); 147 Idx &= 31; // Match the hardware behavior. 148 if (DL.isLittleEndian()) 149 Idx = 31 - Idx; 150 151 if (!ExtractedElts[Idx]) { 152 Value *Op0ToUse = (DL.isLittleEndian()) ? Op1 : Op0; 153 Value *Op1ToUse = (DL.isLittleEndian()) ? Op0 : Op1; 154 ExtractedElts[Idx] = IC.Builder.CreateExtractElement( 155 Idx < 16 ? Op0ToUse : Op1ToUse, IC.Builder.getInt32(Idx & 15)); 156 } 157 158 // Insert this value into the result vector. 159 Result = IC.Builder.CreateInsertElement(Result, ExtractedElts[Idx], 160 IC.Builder.getInt32(i)); 161 } 162 return CastInst::Create(Instruction::BitCast, Result, II.getType()); 163 } 164 } 165 break; 166 } 167 return None; 168 } 169 170 InstructionCost PPCTTIImpl::getIntImmCost(const APInt &Imm, Type *Ty, 171 TTI::TargetCostKind CostKind) { 172 if (DisablePPCConstHoist) 173 return BaseT::getIntImmCost(Imm, Ty, CostKind); 174 175 assert(Ty->isIntegerTy()); 176 177 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 178 if (BitSize == 0) 179 return ~0U; 180 181 if (Imm == 0) 182 return TTI::TCC_Free; 183 184 if (Imm.getBitWidth() <= 64) { 185 if (isInt<16>(Imm.getSExtValue())) 186 return TTI::TCC_Basic; 187 188 if (isInt<32>(Imm.getSExtValue())) { 189 // A constant that can be materialized using lis. 190 if ((Imm.getZExtValue() & 0xFFFF) == 0) 191 return TTI::TCC_Basic; 192 193 return 2 * TTI::TCC_Basic; 194 } 195 } 196 197 return 4 * TTI::TCC_Basic; 198 } 199 200 InstructionCost PPCTTIImpl::getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx, 201 const APInt &Imm, Type *Ty, 202 TTI::TargetCostKind CostKind) { 203 if (DisablePPCConstHoist) 204 return BaseT::getIntImmCostIntrin(IID, Idx, Imm, Ty, CostKind); 205 206 assert(Ty->isIntegerTy()); 207 208 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 209 if (BitSize == 0) 210 return ~0U; 211 212 switch (IID) { 213 default: 214 return TTI::TCC_Free; 215 case Intrinsic::sadd_with_overflow: 216 case Intrinsic::uadd_with_overflow: 217 case Intrinsic::ssub_with_overflow: 218 case Intrinsic::usub_with_overflow: 219 if ((Idx == 1) && Imm.getBitWidth() <= 64 && isInt<16>(Imm.getSExtValue())) 220 return TTI::TCC_Free; 221 break; 222 case Intrinsic::experimental_stackmap: 223 if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue()))) 224 return TTI::TCC_Free; 225 break; 226 case Intrinsic::experimental_patchpoint_void: 227 case Intrinsic::experimental_patchpoint_i64: 228 if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue()))) 229 return TTI::TCC_Free; 230 break; 231 } 232 return PPCTTIImpl::getIntImmCost(Imm, Ty, CostKind); 233 } 234 235 InstructionCost PPCTTIImpl::getIntImmCostInst(unsigned Opcode, unsigned Idx, 236 const APInt &Imm, Type *Ty, 237 TTI::TargetCostKind CostKind, 238 Instruction *Inst) { 239 if (DisablePPCConstHoist) 240 return BaseT::getIntImmCostInst(Opcode, Idx, Imm, Ty, CostKind, Inst); 241 242 assert(Ty->isIntegerTy()); 243 244 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 245 if (BitSize == 0) 246 return ~0U; 247 248 unsigned ImmIdx = ~0U; 249 bool ShiftedFree = false, RunFree = false, UnsignedFree = false, 250 ZeroFree = false; 251 switch (Opcode) { 252 default: 253 return TTI::TCC_Free; 254 case Instruction::GetElementPtr: 255 // Always hoist the base address of a GetElementPtr. This prevents the 256 // creation of new constants for every base constant that gets constant 257 // folded with the offset. 258 if (Idx == 0) 259 return 2 * TTI::TCC_Basic; 260 return TTI::TCC_Free; 261 case Instruction::And: 262 RunFree = true; // (for the rotate-and-mask instructions) 263 LLVM_FALLTHROUGH; 264 case Instruction::Add: 265 case Instruction::Or: 266 case Instruction::Xor: 267 ShiftedFree = true; 268 LLVM_FALLTHROUGH; 269 case Instruction::Sub: 270 case Instruction::Mul: 271 case Instruction::Shl: 272 case Instruction::LShr: 273 case Instruction::AShr: 274 ImmIdx = 1; 275 break; 276 case Instruction::ICmp: 277 UnsignedFree = true; 278 ImmIdx = 1; 279 // Zero comparisons can use record-form instructions. 280 LLVM_FALLTHROUGH; 281 case Instruction::Select: 282 ZeroFree = true; 283 break; 284 case Instruction::PHI: 285 case Instruction::Call: 286 case Instruction::Ret: 287 case Instruction::Load: 288 case Instruction::Store: 289 break; 290 } 291 292 if (ZeroFree && Imm == 0) 293 return TTI::TCC_Free; 294 295 if (Idx == ImmIdx && Imm.getBitWidth() <= 64) { 296 if (isInt<16>(Imm.getSExtValue())) 297 return TTI::TCC_Free; 298 299 if (RunFree) { 300 if (Imm.getBitWidth() <= 32 && 301 (isShiftedMask_32(Imm.getZExtValue()) || 302 isShiftedMask_32(~Imm.getZExtValue()))) 303 return TTI::TCC_Free; 304 305 if (ST->isPPC64() && 306 (isShiftedMask_64(Imm.getZExtValue()) || 307 isShiftedMask_64(~Imm.getZExtValue()))) 308 return TTI::TCC_Free; 309 } 310 311 if (UnsignedFree && isUInt<16>(Imm.getZExtValue())) 312 return TTI::TCC_Free; 313 314 if (ShiftedFree && (Imm.getZExtValue() & 0xFFFF) == 0) 315 return TTI::TCC_Free; 316 } 317 318 return PPCTTIImpl::getIntImmCost(Imm, Ty, CostKind); 319 } 320 321 // Check if the current Type is an MMA vector type. Valid MMA types are 322 // v256i1 and v512i1 respectively. 323 static bool isMMAType(Type *Ty) { 324 return Ty->isVectorTy() && (Ty->getScalarSizeInBits() == 1) && 325 (Ty->getPrimitiveSizeInBits() > 128); 326 } 327 328 InstructionCost PPCTTIImpl::getUserCost(const User *U, 329 ArrayRef<const Value *> Operands, 330 TTI::TargetCostKind CostKind) { 331 // Set the max cost if an MMA type is present (v256i1, v512i1). 332 if (isMMAType(U->getType())) 333 return InstructionCost::getMax(); 334 335 // We already implement getCastInstrCost and getMemoryOpCost where we perform 336 // the vector adjustment there. 337 if (isa<CastInst>(U) || isa<LoadInst>(U) || isa<StoreInst>(U)) 338 return BaseT::getUserCost(U, Operands, CostKind); 339 340 if (U->getType()->isVectorTy()) { 341 // Instructions that need to be split should cost more. 342 std::pair<InstructionCost, MVT> LT = 343 TLI->getTypeLegalizationCost(DL, U->getType()); 344 return LT.first * BaseT::getUserCost(U, Operands, CostKind); 345 } 346 347 return BaseT::getUserCost(U, Operands, CostKind); 348 } 349 350 // Determining the address of a TLS variable results in a function call in 351 // certain TLS models. 352 static bool memAddrUsesCTR(const Value *MemAddr, const PPCTargetMachine &TM, 353 SmallPtrSetImpl<const Value *> &Visited) { 354 // No need to traverse again if we already checked this operand. 355 if (!Visited.insert(MemAddr).second) 356 return false; 357 const auto *GV = dyn_cast<GlobalValue>(MemAddr); 358 if (!GV) { 359 // Recurse to check for constants that refer to TLS global variables. 360 if (const auto *CV = dyn_cast<Constant>(MemAddr)) 361 for (const auto &CO : CV->operands()) 362 if (memAddrUsesCTR(CO, TM, Visited)) 363 return true; 364 return false; 365 } 366 367 if (!GV->isThreadLocal()) 368 return false; 369 TLSModel::Model Model = TM.getTLSModel(GV); 370 return Model == TLSModel::GeneralDynamic || Model == TLSModel::LocalDynamic; 371 } 372 373 bool PPCTTIImpl::mightUseCTR(BasicBlock *BB, TargetLibraryInfo *LibInfo, 374 SmallPtrSetImpl<const Value *> &Visited) { 375 const PPCTargetMachine &TM = ST->getTargetMachine(); 376 377 // Loop through the inline asm constraints and look for something that 378 // clobbers ctr. 379 auto asmClobbersCTR = [](InlineAsm *IA) { 380 InlineAsm::ConstraintInfoVector CIV = IA->ParseConstraints(); 381 for (unsigned i = 0, ie = CIV.size(); i < ie; ++i) { 382 InlineAsm::ConstraintInfo &C = CIV[i]; 383 if (C.Type != InlineAsm::isInput) 384 for (unsigned j = 0, je = C.Codes.size(); j < je; ++j) 385 if (StringRef(C.Codes[j]).equals_insensitive("{ctr}")) 386 return true; 387 } 388 return false; 389 }; 390 391 auto isLargeIntegerTy = [](bool Is32Bit, Type *Ty) { 392 if (IntegerType *ITy = dyn_cast<IntegerType>(Ty)) 393 return ITy->getBitWidth() > (Is32Bit ? 32U : 64U); 394 395 return false; 396 }; 397 398 auto supportedHalfPrecisionOp = [](Instruction *Inst) { 399 switch (Inst->getOpcode()) { 400 default: 401 return false; 402 case Instruction::FPTrunc: 403 case Instruction::FPExt: 404 case Instruction::Load: 405 case Instruction::Store: 406 case Instruction::FPToUI: 407 case Instruction::UIToFP: 408 case Instruction::FPToSI: 409 case Instruction::SIToFP: 410 return true; 411 } 412 }; 413 414 for (BasicBlock::iterator J = BB->begin(), JE = BB->end(); 415 J != JE; ++J) { 416 // There are no direct operations on half precision so assume that 417 // anything with that type requires a call except for a few select 418 // operations with Power9. 419 if (Instruction *CurrInst = dyn_cast<Instruction>(J)) { 420 for (const auto &Op : CurrInst->operands()) { 421 if (Op->getType()->getScalarType()->isHalfTy() || 422 CurrInst->getType()->getScalarType()->isHalfTy()) 423 return !(ST->isISA3_0() && supportedHalfPrecisionOp(CurrInst)); 424 } 425 } 426 if (CallInst *CI = dyn_cast<CallInst>(J)) { 427 // Inline ASM is okay, unless it clobbers the ctr register. 428 if (InlineAsm *IA = dyn_cast<InlineAsm>(CI->getCalledOperand())) { 429 if (asmClobbersCTR(IA)) 430 return true; 431 continue; 432 } 433 434 if (Function *F = CI->getCalledFunction()) { 435 // Most intrinsics don't become function calls, but some might. 436 // sin, cos, exp and log are always calls. 437 unsigned Opcode = 0; 438 if (F->getIntrinsicID() != Intrinsic::not_intrinsic) { 439 switch (F->getIntrinsicID()) { 440 default: continue; 441 // If we have a call to loop_decrement or set_loop_iterations, 442 // we're definitely using CTR. 443 case Intrinsic::set_loop_iterations: 444 case Intrinsic::loop_decrement: 445 return true; 446 447 // Binary operations on 128-bit value will use CTR. 448 case Intrinsic::experimental_constrained_fadd: 449 case Intrinsic::experimental_constrained_fsub: 450 case Intrinsic::experimental_constrained_fmul: 451 case Intrinsic::experimental_constrained_fdiv: 452 case Intrinsic::experimental_constrained_frem: 453 if (F->getType()->getScalarType()->isFP128Ty() || 454 F->getType()->getScalarType()->isPPC_FP128Ty()) 455 return true; 456 break; 457 458 case Intrinsic::experimental_constrained_fptosi: 459 case Intrinsic::experimental_constrained_fptoui: 460 case Intrinsic::experimental_constrained_sitofp: 461 case Intrinsic::experimental_constrained_uitofp: { 462 Type *SrcType = CI->getArgOperand(0)->getType()->getScalarType(); 463 Type *DstType = CI->getType()->getScalarType(); 464 if (SrcType->isPPC_FP128Ty() || DstType->isPPC_FP128Ty() || 465 isLargeIntegerTy(!TM.isPPC64(), SrcType) || 466 isLargeIntegerTy(!TM.isPPC64(), DstType)) 467 return true; 468 break; 469 } 470 471 // Exclude eh_sjlj_setjmp; we don't need to exclude eh_sjlj_longjmp 472 // because, although it does clobber the counter register, the 473 // control can't then return to inside the loop unless there is also 474 // an eh_sjlj_setjmp. 475 case Intrinsic::eh_sjlj_setjmp: 476 477 case Intrinsic::memcpy: 478 case Intrinsic::memmove: 479 case Intrinsic::memset: 480 case Intrinsic::powi: 481 case Intrinsic::log: 482 case Intrinsic::log2: 483 case Intrinsic::log10: 484 case Intrinsic::exp: 485 case Intrinsic::exp2: 486 case Intrinsic::pow: 487 case Intrinsic::sin: 488 case Intrinsic::cos: 489 case Intrinsic::experimental_constrained_powi: 490 case Intrinsic::experimental_constrained_log: 491 case Intrinsic::experimental_constrained_log2: 492 case Intrinsic::experimental_constrained_log10: 493 case Intrinsic::experimental_constrained_exp: 494 case Intrinsic::experimental_constrained_exp2: 495 case Intrinsic::experimental_constrained_pow: 496 case Intrinsic::experimental_constrained_sin: 497 case Intrinsic::experimental_constrained_cos: 498 return true; 499 // There is no corresponding FMA instruction for PPC double double. 500 // Thus, we need to disable CTR loop generation for this type. 501 case Intrinsic::fmuladd: 502 case Intrinsic::copysign: 503 if (CI->getArgOperand(0)->getType()->getScalarType()-> 504 isPPC_FP128Ty()) 505 return true; 506 else 507 continue; // ISD::FCOPYSIGN is never a library call. 508 case Intrinsic::fma: Opcode = ISD::FMA; break; 509 case Intrinsic::sqrt: Opcode = ISD::FSQRT; break; 510 case Intrinsic::floor: Opcode = ISD::FFLOOR; break; 511 case Intrinsic::ceil: Opcode = ISD::FCEIL; break; 512 case Intrinsic::trunc: Opcode = ISD::FTRUNC; break; 513 case Intrinsic::rint: Opcode = ISD::FRINT; break; 514 case Intrinsic::lrint: Opcode = ISD::LRINT; break; 515 case Intrinsic::llrint: Opcode = ISD::LLRINT; break; 516 case Intrinsic::nearbyint: Opcode = ISD::FNEARBYINT; break; 517 case Intrinsic::round: Opcode = ISD::FROUND; break; 518 case Intrinsic::lround: Opcode = ISD::LROUND; break; 519 case Intrinsic::llround: Opcode = ISD::LLROUND; break; 520 case Intrinsic::minnum: Opcode = ISD::FMINNUM; break; 521 case Intrinsic::maxnum: Opcode = ISD::FMAXNUM; break; 522 case Intrinsic::experimental_constrained_fcmp: 523 Opcode = ISD::STRICT_FSETCC; 524 break; 525 case Intrinsic::experimental_constrained_fcmps: 526 Opcode = ISD::STRICT_FSETCCS; 527 break; 528 case Intrinsic::experimental_constrained_fma: 529 Opcode = ISD::STRICT_FMA; 530 break; 531 case Intrinsic::experimental_constrained_sqrt: 532 Opcode = ISD::STRICT_FSQRT; 533 break; 534 case Intrinsic::experimental_constrained_floor: 535 Opcode = ISD::STRICT_FFLOOR; 536 break; 537 case Intrinsic::experimental_constrained_ceil: 538 Opcode = ISD::STRICT_FCEIL; 539 break; 540 case Intrinsic::experimental_constrained_trunc: 541 Opcode = ISD::STRICT_FTRUNC; 542 break; 543 case Intrinsic::experimental_constrained_rint: 544 Opcode = ISD::STRICT_FRINT; 545 break; 546 case Intrinsic::experimental_constrained_lrint: 547 Opcode = ISD::STRICT_LRINT; 548 break; 549 case Intrinsic::experimental_constrained_llrint: 550 Opcode = ISD::STRICT_LLRINT; 551 break; 552 case Intrinsic::experimental_constrained_nearbyint: 553 Opcode = ISD::STRICT_FNEARBYINT; 554 break; 555 case Intrinsic::experimental_constrained_round: 556 Opcode = ISD::STRICT_FROUND; 557 break; 558 case Intrinsic::experimental_constrained_lround: 559 Opcode = ISD::STRICT_LROUND; 560 break; 561 case Intrinsic::experimental_constrained_llround: 562 Opcode = ISD::STRICT_LLROUND; 563 break; 564 case Intrinsic::experimental_constrained_minnum: 565 Opcode = ISD::STRICT_FMINNUM; 566 break; 567 case Intrinsic::experimental_constrained_maxnum: 568 Opcode = ISD::STRICT_FMAXNUM; 569 break; 570 case Intrinsic::umul_with_overflow: Opcode = ISD::UMULO; break; 571 case Intrinsic::smul_with_overflow: Opcode = ISD::SMULO; break; 572 } 573 } 574 575 // PowerPC does not use [US]DIVREM or other library calls for 576 // operations on regular types which are not otherwise library calls 577 // (i.e. soft float or atomics). If adapting for targets that do, 578 // additional care is required here. 579 580 LibFunc Func; 581 if (!F->hasLocalLinkage() && F->hasName() && LibInfo && 582 LibInfo->getLibFunc(F->getName(), Func) && 583 LibInfo->hasOptimizedCodeGen(Func)) { 584 // Non-read-only functions are never treated as intrinsics. 585 if (!CI->onlyReadsMemory()) 586 return true; 587 588 // Conversion happens only for FP calls. 589 if (!CI->getArgOperand(0)->getType()->isFloatingPointTy()) 590 return true; 591 592 switch (Func) { 593 default: return true; 594 case LibFunc_copysign: 595 case LibFunc_copysignf: 596 continue; // ISD::FCOPYSIGN is never a library call. 597 case LibFunc_copysignl: 598 return true; 599 case LibFunc_fabs: 600 case LibFunc_fabsf: 601 case LibFunc_fabsl: 602 continue; // ISD::FABS is never a library call. 603 case LibFunc_sqrt: 604 case LibFunc_sqrtf: 605 case LibFunc_sqrtl: 606 Opcode = ISD::FSQRT; break; 607 case LibFunc_floor: 608 case LibFunc_floorf: 609 case LibFunc_floorl: 610 Opcode = ISD::FFLOOR; break; 611 case LibFunc_nearbyint: 612 case LibFunc_nearbyintf: 613 case LibFunc_nearbyintl: 614 Opcode = ISD::FNEARBYINT; break; 615 case LibFunc_ceil: 616 case LibFunc_ceilf: 617 case LibFunc_ceill: 618 Opcode = ISD::FCEIL; break; 619 case LibFunc_rint: 620 case LibFunc_rintf: 621 case LibFunc_rintl: 622 Opcode = ISD::FRINT; break; 623 case LibFunc_round: 624 case LibFunc_roundf: 625 case LibFunc_roundl: 626 Opcode = ISD::FROUND; break; 627 case LibFunc_trunc: 628 case LibFunc_truncf: 629 case LibFunc_truncl: 630 Opcode = ISD::FTRUNC; break; 631 case LibFunc_fmin: 632 case LibFunc_fminf: 633 case LibFunc_fminl: 634 Opcode = ISD::FMINNUM; break; 635 case LibFunc_fmax: 636 case LibFunc_fmaxf: 637 case LibFunc_fmaxl: 638 Opcode = ISD::FMAXNUM; break; 639 } 640 } 641 642 if (Opcode) { 643 EVT EVTy = 644 TLI->getValueType(DL, CI->getArgOperand(0)->getType(), true); 645 646 if (EVTy == MVT::Other) 647 return true; 648 649 if (TLI->isOperationLegalOrCustom(Opcode, EVTy)) 650 continue; 651 else if (EVTy.isVector() && 652 TLI->isOperationLegalOrCustom(Opcode, EVTy.getScalarType())) 653 continue; 654 655 return true; 656 } 657 } 658 659 return true; 660 } else if (isa<BinaryOperator>(J) && 661 (J->getType()->getScalarType()->isFP128Ty() || 662 J->getType()->getScalarType()->isPPC_FP128Ty())) { 663 // Most operations on f128 or ppc_f128 values become calls. 664 return true; 665 } else if (isa<UIToFPInst>(J) || isa<SIToFPInst>(J) || 666 isa<FPToUIInst>(J) || isa<FPToSIInst>(J)) { 667 CastInst *CI = cast<CastInst>(J); 668 if (CI->getSrcTy()->getScalarType()->isPPC_FP128Ty() || 669 CI->getDestTy()->getScalarType()->isPPC_FP128Ty() || 670 isLargeIntegerTy(!TM.isPPC64(), CI->getSrcTy()->getScalarType()) || 671 isLargeIntegerTy(!TM.isPPC64(), CI->getDestTy()->getScalarType())) 672 return true; 673 } else if (isLargeIntegerTy(!TM.isPPC64(), 674 J->getType()->getScalarType()) && 675 (J->getOpcode() == Instruction::UDiv || 676 J->getOpcode() == Instruction::SDiv || 677 J->getOpcode() == Instruction::URem || 678 J->getOpcode() == Instruction::SRem)) { 679 return true; 680 } else if (!TM.isPPC64() && 681 isLargeIntegerTy(false, J->getType()->getScalarType()) && 682 (J->getOpcode() == Instruction::Shl || 683 J->getOpcode() == Instruction::AShr || 684 J->getOpcode() == Instruction::LShr)) { 685 // Only on PPC32, for 128-bit integers (specifically not 64-bit 686 // integers), these might be runtime calls. 687 return true; 688 } else if (isa<IndirectBrInst>(J) || isa<InvokeInst>(J)) { 689 // On PowerPC, indirect jumps use the counter register. 690 return true; 691 } else if (SwitchInst *SI = dyn_cast<SwitchInst>(J)) { 692 if (SI->getNumCases() + 1 >= (unsigned)TLI->getMinimumJumpTableEntries()) 693 return true; 694 } 695 696 // FREM is always a call. 697 if (J->getOpcode() == Instruction::FRem) 698 return true; 699 700 if (ST->useSoftFloat()) { 701 switch(J->getOpcode()) { 702 case Instruction::FAdd: 703 case Instruction::FSub: 704 case Instruction::FMul: 705 case Instruction::FDiv: 706 case Instruction::FPTrunc: 707 case Instruction::FPExt: 708 case Instruction::FPToUI: 709 case Instruction::FPToSI: 710 case Instruction::UIToFP: 711 case Instruction::SIToFP: 712 case Instruction::FCmp: 713 return true; 714 } 715 } 716 717 for (Value *Operand : J->operands()) 718 if (memAddrUsesCTR(Operand, TM, Visited)) 719 return true; 720 } 721 722 return false; 723 } 724 725 bool PPCTTIImpl::isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE, 726 AssumptionCache &AC, 727 TargetLibraryInfo *LibInfo, 728 HardwareLoopInfo &HWLoopInfo) { 729 const PPCTargetMachine &TM = ST->getTargetMachine(); 730 TargetSchedModel SchedModel; 731 SchedModel.init(ST); 732 733 // Do not convert small short loops to CTR loop. 734 unsigned ConstTripCount = SE.getSmallConstantTripCount(L); 735 if (ConstTripCount && ConstTripCount < SmallCTRLoopThreshold) { 736 SmallPtrSet<const Value *, 32> EphValues; 737 CodeMetrics::collectEphemeralValues(L, &AC, EphValues); 738 CodeMetrics Metrics; 739 for (BasicBlock *BB : L->blocks()) 740 Metrics.analyzeBasicBlock(BB, *this, EphValues); 741 // 6 is an approximate latency for the mtctr instruction. 742 if (Metrics.NumInsts <= (6 * SchedModel.getIssueWidth())) 743 return false; 744 } 745 746 // We don't want to spill/restore the counter register, and so we don't 747 // want to use the counter register if the loop contains calls. 748 SmallPtrSet<const Value *, 4> Visited; 749 for (Loop::block_iterator I = L->block_begin(), IE = L->block_end(); 750 I != IE; ++I) 751 if (mightUseCTR(*I, LibInfo, Visited)) 752 return false; 753 754 SmallVector<BasicBlock*, 4> ExitingBlocks; 755 L->getExitingBlocks(ExitingBlocks); 756 757 // If there is an exit edge known to be frequently taken, 758 // we should not transform this loop. 759 for (auto &BB : ExitingBlocks) { 760 Instruction *TI = BB->getTerminator(); 761 if (!TI) continue; 762 763 if (BranchInst *BI = dyn_cast<BranchInst>(TI)) { 764 uint64_t TrueWeight = 0, FalseWeight = 0; 765 if (!BI->isConditional() || 766 !BI->extractProfMetadata(TrueWeight, FalseWeight)) 767 continue; 768 769 // If the exit path is more frequent than the loop path, 770 // we return here without further analysis for this loop. 771 bool TrueIsExit = !L->contains(BI->getSuccessor(0)); 772 if (( TrueIsExit && FalseWeight < TrueWeight) || 773 (!TrueIsExit && FalseWeight > TrueWeight)) 774 return false; 775 } 776 } 777 778 // If an exit block has a PHI that accesses a TLS variable as one of the 779 // incoming values from the loop, we cannot produce a CTR loop because the 780 // address for that value will be computed in the loop. 781 SmallVector<BasicBlock *, 4> ExitBlocks; 782 L->getExitBlocks(ExitBlocks); 783 for (auto &BB : ExitBlocks) { 784 for (auto &PHI : BB->phis()) { 785 for (int Idx = 0, EndIdx = PHI.getNumIncomingValues(); Idx < EndIdx; 786 Idx++) { 787 const BasicBlock *IncomingBB = PHI.getIncomingBlock(Idx); 788 const Value *IncomingValue = PHI.getIncomingValue(Idx); 789 if (L->contains(IncomingBB) && 790 memAddrUsesCTR(IncomingValue, TM, Visited)) 791 return false; 792 } 793 } 794 } 795 796 LLVMContext &C = L->getHeader()->getContext(); 797 HWLoopInfo.CountType = TM.isPPC64() ? 798 Type::getInt64Ty(C) : Type::getInt32Ty(C); 799 HWLoopInfo.LoopDecrement = ConstantInt::get(HWLoopInfo.CountType, 1); 800 return true; 801 } 802 803 void PPCTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE, 804 TTI::UnrollingPreferences &UP, 805 OptimizationRemarkEmitter *ORE) { 806 if (ST->getCPUDirective() == PPC::DIR_A2) { 807 // The A2 is in-order with a deep pipeline, and concatenation unrolling 808 // helps expose latency-hiding opportunities to the instruction scheduler. 809 UP.Partial = UP.Runtime = true; 810 811 // We unroll a lot on the A2 (hundreds of instructions), and the benefits 812 // often outweigh the cost of a division to compute the trip count. 813 UP.AllowExpensiveTripCount = true; 814 } 815 816 BaseT::getUnrollingPreferences(L, SE, UP, ORE); 817 } 818 819 void PPCTTIImpl::getPeelingPreferences(Loop *L, ScalarEvolution &SE, 820 TTI::PeelingPreferences &PP) { 821 BaseT::getPeelingPreferences(L, SE, PP); 822 } 823 // This function returns true to allow using coldcc calling convention. 824 // Returning true results in coldcc being used for functions which are cold at 825 // all call sites when the callers of the functions are not calling any other 826 // non coldcc functions. 827 bool PPCTTIImpl::useColdCCForColdCall(Function &F) { 828 return EnablePPCColdCC; 829 } 830 831 bool PPCTTIImpl::enableAggressiveInterleaving(bool LoopHasReductions) { 832 // On the A2, always unroll aggressively. 833 if (ST->getCPUDirective() == PPC::DIR_A2) 834 return true; 835 836 return LoopHasReductions; 837 } 838 839 PPCTTIImpl::TTI::MemCmpExpansionOptions 840 PPCTTIImpl::enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const { 841 TTI::MemCmpExpansionOptions Options; 842 Options.LoadSizes = {8, 4, 2, 1}; 843 Options.MaxNumLoads = TLI->getMaxExpandSizeMemcmp(OptSize); 844 return Options; 845 } 846 847 bool PPCTTIImpl::enableInterleavedAccessVectorization() { 848 return true; 849 } 850 851 unsigned PPCTTIImpl::getNumberOfRegisters(unsigned ClassID) const { 852 assert(ClassID == GPRRC || ClassID == FPRRC || 853 ClassID == VRRC || ClassID == VSXRC); 854 if (ST->hasVSX()) { 855 assert(ClassID == GPRRC || ClassID == VSXRC || ClassID == VRRC); 856 return ClassID == VSXRC ? 64 : 32; 857 } 858 assert(ClassID == GPRRC || ClassID == FPRRC || ClassID == VRRC); 859 return 32; 860 } 861 862 unsigned PPCTTIImpl::getRegisterClassForType(bool Vector, Type *Ty) const { 863 if (Vector) 864 return ST->hasVSX() ? VSXRC : VRRC; 865 else if (Ty && (Ty->getScalarType()->isFloatTy() || 866 Ty->getScalarType()->isDoubleTy())) 867 return ST->hasVSX() ? VSXRC : FPRRC; 868 else if (Ty && (Ty->getScalarType()->isFP128Ty() || 869 Ty->getScalarType()->isPPC_FP128Ty())) 870 return VRRC; 871 else if (Ty && Ty->getScalarType()->isHalfTy()) 872 return VSXRC; 873 else 874 return GPRRC; 875 } 876 877 const char* PPCTTIImpl::getRegisterClassName(unsigned ClassID) const { 878 879 switch (ClassID) { 880 default: 881 llvm_unreachable("unknown register class"); 882 return "PPC::unknown register class"; 883 case GPRRC: return "PPC::GPRRC"; 884 case FPRRC: return "PPC::FPRRC"; 885 case VRRC: return "PPC::VRRC"; 886 case VSXRC: return "PPC::VSXRC"; 887 } 888 } 889 890 TypeSize 891 PPCTTIImpl::getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const { 892 switch (K) { 893 case TargetTransformInfo::RGK_Scalar: 894 return TypeSize::getFixed(ST->isPPC64() ? 64 : 32); 895 case TargetTransformInfo::RGK_FixedWidthVector: 896 return TypeSize::getFixed(ST->hasAltivec() ? 128 : 0); 897 case TargetTransformInfo::RGK_ScalableVector: 898 return TypeSize::getScalable(0); 899 } 900 901 llvm_unreachable("Unsupported register kind"); 902 } 903 904 unsigned PPCTTIImpl::getCacheLineSize() const { 905 // Check first if the user specified a custom line size. 906 if (CacheLineSize.getNumOccurrences() > 0) 907 return CacheLineSize; 908 909 // Starting with P7 we have a cache line size of 128. 910 unsigned Directive = ST->getCPUDirective(); 911 // Assume that Future CPU has the same cache line size as the others. 912 if (Directive == PPC::DIR_PWR7 || Directive == PPC::DIR_PWR8 || 913 Directive == PPC::DIR_PWR9 || Directive == PPC::DIR_PWR10 || 914 Directive == PPC::DIR_PWR_FUTURE) 915 return 128; 916 917 // On other processors return a default of 64 bytes. 918 return 64; 919 } 920 921 unsigned PPCTTIImpl::getPrefetchDistance() const { 922 return 300; 923 } 924 925 unsigned PPCTTIImpl::getMaxInterleaveFactor(unsigned VF) { 926 unsigned Directive = ST->getCPUDirective(); 927 // The 440 has no SIMD support, but floating-point instructions 928 // have a 5-cycle latency, so unroll by 5x for latency hiding. 929 if (Directive == PPC::DIR_440) 930 return 5; 931 932 // The A2 has no SIMD support, but floating-point instructions 933 // have a 6-cycle latency, so unroll by 6x for latency hiding. 934 if (Directive == PPC::DIR_A2) 935 return 6; 936 937 // FIXME: For lack of any better information, do no harm... 938 if (Directive == PPC::DIR_E500mc || Directive == PPC::DIR_E5500) 939 return 1; 940 941 // For P7 and P8, floating-point instructions have a 6-cycle latency and 942 // there are two execution units, so unroll by 12x for latency hiding. 943 // FIXME: the same for P9 as previous gen until POWER9 scheduling is ready 944 // FIXME: the same for P10 as previous gen until POWER10 scheduling is ready 945 // Assume that future is the same as the others. 946 if (Directive == PPC::DIR_PWR7 || Directive == PPC::DIR_PWR8 || 947 Directive == PPC::DIR_PWR9 || Directive == PPC::DIR_PWR10 || 948 Directive == PPC::DIR_PWR_FUTURE) 949 return 12; 950 951 // For most things, modern systems have two execution units (and 952 // out-of-order execution). 953 return 2; 954 } 955 956 // Returns a cost adjustment factor to adjust the cost of vector instructions 957 // on targets which there is overlap between the vector and scalar units, 958 // thereby reducing the overall throughput of vector code wrt. scalar code. 959 // An invalid instruction cost is returned if the type is an MMA vector type. 960 InstructionCost PPCTTIImpl::vectorCostAdjustmentFactor(unsigned Opcode, 961 Type *Ty1, Type *Ty2) { 962 // If the vector type is of an MMA type (v256i1, v512i1), an invalid 963 // instruction cost is returned. This is to signify to other cost computing 964 // functions to return the maximum instruction cost in order to prevent any 965 // opportunities for the optimizer to produce MMA types within the IR. 966 if (isMMAType(Ty1)) 967 return InstructionCost::getInvalid(); 968 969 if (!ST->vectorsUseTwoUnits() || !Ty1->isVectorTy()) 970 return InstructionCost(1); 971 972 std::pair<InstructionCost, MVT> LT1 = TLI->getTypeLegalizationCost(DL, Ty1); 973 // If type legalization involves splitting the vector, we don't want to 974 // double the cost at every step - only the last step. 975 if (LT1.first != 1 || !LT1.second.isVector()) 976 return InstructionCost(1); 977 978 int ISD = TLI->InstructionOpcodeToISD(Opcode); 979 if (TLI->isOperationExpand(ISD, LT1.second)) 980 return InstructionCost(1); 981 982 if (Ty2) { 983 std::pair<InstructionCost, MVT> LT2 = TLI->getTypeLegalizationCost(DL, Ty2); 984 if (LT2.first != 1 || !LT2.second.isVector()) 985 return InstructionCost(1); 986 } 987 988 return InstructionCost(2); 989 } 990 991 InstructionCost PPCTTIImpl::getArithmeticInstrCost( 992 unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, 993 TTI::OperandValueKind Op1Info, TTI::OperandValueKind Op2Info, 994 TTI::OperandValueProperties Opd1PropInfo, 995 TTI::OperandValueProperties Opd2PropInfo, ArrayRef<const Value *> Args, 996 const Instruction *CxtI) { 997 assert(TLI->InstructionOpcodeToISD(Opcode) && "Invalid opcode"); 998 999 InstructionCost CostFactor = vectorCostAdjustmentFactor(Opcode, Ty, nullptr); 1000 if (!CostFactor.isValid()) 1001 return InstructionCost::getMax(); 1002 1003 // TODO: Handle more cost kinds. 1004 if (CostKind != TTI::TCK_RecipThroughput) 1005 return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info, 1006 Op2Info, Opd1PropInfo, 1007 Opd2PropInfo, Args, CxtI); 1008 1009 // Fallback to the default implementation. 1010 InstructionCost Cost = BaseT::getArithmeticInstrCost( 1011 Opcode, Ty, CostKind, Op1Info, Op2Info, Opd1PropInfo, Opd2PropInfo); 1012 return Cost * CostFactor; 1013 } 1014 1015 InstructionCost PPCTTIImpl::getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, 1016 ArrayRef<int> Mask, int Index, 1017 Type *SubTp) { 1018 1019 InstructionCost CostFactor = 1020 vectorCostAdjustmentFactor(Instruction::ShuffleVector, Tp, nullptr); 1021 if (!CostFactor.isValid()) 1022 return InstructionCost::getMax(); 1023 1024 // Legalize the type. 1025 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp); 1026 1027 // PPC, for both Altivec/VSX, support cheap arbitrary permutations 1028 // (at least in the sense that there need only be one non-loop-invariant 1029 // instruction). We need one such shuffle instruction for each actual 1030 // register (this is not true for arbitrary shuffles, but is true for the 1031 // structured types of shuffles covered by TTI::ShuffleKind). 1032 return LT.first * CostFactor; 1033 } 1034 1035 InstructionCost PPCTTIImpl::getCFInstrCost(unsigned Opcode, 1036 TTI::TargetCostKind CostKind, 1037 const Instruction *I) { 1038 if (CostKind != TTI::TCK_RecipThroughput) 1039 return Opcode == Instruction::PHI ? 0 : 1; 1040 // Branches are assumed to be predicted. 1041 return 0; 1042 } 1043 1044 InstructionCost PPCTTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, 1045 Type *Src, 1046 TTI::CastContextHint CCH, 1047 TTI::TargetCostKind CostKind, 1048 const Instruction *I) { 1049 assert(TLI->InstructionOpcodeToISD(Opcode) && "Invalid opcode"); 1050 1051 InstructionCost CostFactor = vectorCostAdjustmentFactor(Opcode, Dst, Src); 1052 if (!CostFactor.isValid()) 1053 return InstructionCost::getMax(); 1054 1055 InstructionCost Cost = 1056 BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I); 1057 Cost *= CostFactor; 1058 // TODO: Allow non-throughput costs that aren't binary. 1059 if (CostKind != TTI::TCK_RecipThroughput) 1060 return Cost == 0 ? 0 : 1; 1061 return Cost; 1062 } 1063 1064 InstructionCost PPCTTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy, 1065 Type *CondTy, 1066 CmpInst::Predicate VecPred, 1067 TTI::TargetCostKind CostKind, 1068 const Instruction *I) { 1069 InstructionCost CostFactor = 1070 vectorCostAdjustmentFactor(Opcode, ValTy, nullptr); 1071 if (!CostFactor.isValid()) 1072 return InstructionCost::getMax(); 1073 1074 InstructionCost Cost = 1075 BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind, I); 1076 // TODO: Handle other cost kinds. 1077 if (CostKind != TTI::TCK_RecipThroughput) 1078 return Cost; 1079 return Cost * CostFactor; 1080 } 1081 1082 InstructionCost PPCTTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val, 1083 unsigned Index) { 1084 assert(Val->isVectorTy() && "This must be a vector type"); 1085 1086 int ISD = TLI->InstructionOpcodeToISD(Opcode); 1087 assert(ISD && "Invalid opcode"); 1088 1089 InstructionCost CostFactor = vectorCostAdjustmentFactor(Opcode, Val, nullptr); 1090 if (!CostFactor.isValid()) 1091 return InstructionCost::getMax(); 1092 1093 InstructionCost Cost = BaseT::getVectorInstrCost(Opcode, Val, Index); 1094 Cost *= CostFactor; 1095 1096 if (ST->hasVSX() && Val->getScalarType()->isDoubleTy()) { 1097 // Double-precision scalars are already located in index #0 (or #1 if LE). 1098 if (ISD == ISD::EXTRACT_VECTOR_ELT && 1099 Index == (ST->isLittleEndian() ? 1 : 0)) 1100 return 0; 1101 1102 return Cost; 1103 1104 } else if (Val->getScalarType()->isIntegerTy() && Index != -1U) { 1105 if (ST->hasP9Altivec()) { 1106 if (ISD == ISD::INSERT_VECTOR_ELT) 1107 // A move-to VSR and a permute/insert. Assume vector operation cost 1108 // for both (cost will be 2x on P9). 1109 return 2 * CostFactor; 1110 1111 // It's an extract. Maybe we can do a cheap move-from VSR. 1112 unsigned EltSize = Val->getScalarSizeInBits(); 1113 if (EltSize == 64) { 1114 unsigned MfvsrdIndex = ST->isLittleEndian() ? 1 : 0; 1115 if (Index == MfvsrdIndex) 1116 return 1; 1117 } else if (EltSize == 32) { 1118 unsigned MfvsrwzIndex = ST->isLittleEndian() ? 2 : 1; 1119 if (Index == MfvsrwzIndex) 1120 return 1; 1121 } 1122 1123 // We need a vector extract (or mfvsrld). Assume vector operation cost. 1124 // The cost of the load constant for a vector extract is disregarded 1125 // (invariant, easily schedulable). 1126 return CostFactor; 1127 1128 } else if (ST->hasDirectMove()) 1129 // Assume permute has standard cost. 1130 // Assume move-to/move-from VSR have 2x standard cost. 1131 return 3; 1132 } 1133 1134 // Estimated cost of a load-hit-store delay. This was obtained 1135 // experimentally as a minimum needed to prevent unprofitable 1136 // vectorization for the paq8p benchmark. It may need to be 1137 // raised further if other unprofitable cases remain. 1138 unsigned LHSPenalty = 2; 1139 if (ISD == ISD::INSERT_VECTOR_ELT) 1140 LHSPenalty += 7; 1141 1142 // Vector element insert/extract with Altivec is very expensive, 1143 // because they require store and reload with the attendant 1144 // processor stall for load-hit-store. Until VSX is available, 1145 // these need to be estimated as very costly. 1146 if (ISD == ISD::EXTRACT_VECTOR_ELT || 1147 ISD == ISD::INSERT_VECTOR_ELT) 1148 return LHSPenalty + Cost; 1149 1150 return Cost; 1151 } 1152 1153 InstructionCost PPCTTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src, 1154 MaybeAlign Alignment, 1155 unsigned AddressSpace, 1156 TTI::TargetCostKind CostKind, 1157 const Instruction *I) { 1158 1159 InstructionCost CostFactor = vectorCostAdjustmentFactor(Opcode, Src, nullptr); 1160 if (!CostFactor.isValid()) 1161 return InstructionCost::getMax(); 1162 1163 if (TLI->getValueType(DL, Src, true) == MVT::Other) 1164 return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace, 1165 CostKind); 1166 // Legalize the type. 1167 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Src); 1168 assert((Opcode == Instruction::Load || Opcode == Instruction::Store) && 1169 "Invalid Opcode"); 1170 1171 InstructionCost Cost = 1172 BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace, CostKind); 1173 // TODO: Handle other cost kinds. 1174 if (CostKind != TTI::TCK_RecipThroughput) 1175 return Cost; 1176 1177 Cost *= CostFactor; 1178 1179 bool IsAltivecType = ST->hasAltivec() && 1180 (LT.second == MVT::v16i8 || LT.second == MVT::v8i16 || 1181 LT.second == MVT::v4i32 || LT.second == MVT::v4f32); 1182 bool IsVSXType = ST->hasVSX() && 1183 (LT.second == MVT::v2f64 || LT.second == MVT::v2i64); 1184 1185 // VSX has 32b/64b load instructions. Legalization can handle loading of 1186 // 32b/64b to VSR correctly and cheaply. But BaseT::getMemoryOpCost and 1187 // PPCTargetLowering can't compute the cost appropriately. So here we 1188 // explicitly check this case. 1189 unsigned MemBytes = Src->getPrimitiveSizeInBits(); 1190 if (Opcode == Instruction::Load && ST->hasVSX() && IsAltivecType && 1191 (MemBytes == 64 || (ST->hasP8Vector() && MemBytes == 32))) 1192 return 1; 1193 1194 // Aligned loads and stores are easy. 1195 unsigned SrcBytes = LT.second.getStoreSize(); 1196 if (!SrcBytes || !Alignment || *Alignment >= SrcBytes) 1197 return Cost; 1198 1199 // If we can use the permutation-based load sequence, then this is also 1200 // relatively cheap (not counting loop-invariant instructions): one load plus 1201 // one permute (the last load in a series has extra cost, but we're 1202 // neglecting that here). Note that on the P7, we could do unaligned loads 1203 // for Altivec types using the VSX instructions, but that's more expensive 1204 // than using the permutation-based load sequence. On the P8, that's no 1205 // longer true. 1206 if (Opcode == Instruction::Load && (!ST->hasP8Vector() && IsAltivecType) && 1207 *Alignment >= LT.second.getScalarType().getStoreSize()) 1208 return Cost + LT.first; // Add the cost of the permutations. 1209 1210 // For VSX, we can do unaligned loads and stores on Altivec/VSX types. On the 1211 // P7, unaligned vector loads are more expensive than the permutation-based 1212 // load sequence, so that might be used instead, but regardless, the net cost 1213 // is about the same (not counting loop-invariant instructions). 1214 if (IsVSXType || (ST->hasVSX() && IsAltivecType)) 1215 return Cost; 1216 1217 // Newer PPC supports unaligned memory access. 1218 if (TLI->allowsMisalignedMemoryAccesses(LT.second, 0)) 1219 return Cost; 1220 1221 // PPC in general does not support unaligned loads and stores. They'll need 1222 // to be decomposed based on the alignment factor. 1223 1224 // Add the cost of each scalar load or store. 1225 assert(Alignment); 1226 Cost += LT.first * ((SrcBytes / Alignment->value()) - 1); 1227 1228 // For a vector type, there is also scalarization overhead (only for 1229 // stores, loads are expanded using the vector-load + permutation sequence, 1230 // which is much less expensive). 1231 if (Src->isVectorTy() && Opcode == Instruction::Store) 1232 for (int i = 0, e = cast<FixedVectorType>(Src)->getNumElements(); i < e; 1233 ++i) 1234 Cost += getVectorInstrCost(Instruction::ExtractElement, Src, i); 1235 1236 return Cost; 1237 } 1238 1239 InstructionCost PPCTTIImpl::getInterleavedMemoryOpCost( 1240 unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices, 1241 Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, 1242 bool UseMaskForCond, bool UseMaskForGaps) { 1243 InstructionCost CostFactor = 1244 vectorCostAdjustmentFactor(Opcode, VecTy, nullptr); 1245 if (!CostFactor.isValid()) 1246 return InstructionCost::getMax(); 1247 1248 if (UseMaskForCond || UseMaskForGaps) 1249 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices, 1250 Alignment, AddressSpace, CostKind, 1251 UseMaskForCond, UseMaskForGaps); 1252 1253 assert(isa<VectorType>(VecTy) && 1254 "Expect a vector type for interleaved memory op"); 1255 1256 // Legalize the type. 1257 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, VecTy); 1258 1259 // Firstly, the cost of load/store operation. 1260 InstructionCost Cost = getMemoryOpCost(Opcode, VecTy, MaybeAlign(Alignment), 1261 AddressSpace, CostKind); 1262 1263 // PPC, for both Altivec/VSX, support cheap arbitrary permutations 1264 // (at least in the sense that there need only be one non-loop-invariant 1265 // instruction). For each result vector, we need one shuffle per incoming 1266 // vector (except that the first shuffle can take two incoming vectors 1267 // because it does not need to take itself). 1268 Cost += Factor*(LT.first-1); 1269 1270 return Cost; 1271 } 1272 1273 InstructionCost 1274 PPCTTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, 1275 TTI::TargetCostKind CostKind) { 1276 return BaseT::getIntrinsicInstrCost(ICA, CostKind); 1277 } 1278 1279 bool PPCTTIImpl::areFunctionArgsABICompatible( 1280 const Function *Caller, const Function *Callee, 1281 SmallPtrSetImpl<Argument *> &Args) const { 1282 1283 // We need to ensure that argument promotion does not 1284 // attempt to promote pointers to MMA types (__vector_pair 1285 // and __vector_quad) since these types explicitly cannot be 1286 // passed as arguments. Both of these types are larger than 1287 // the 128-bit Altivec vectors and have a scalar size of 1 bit. 1288 if (!BaseT::areFunctionArgsABICompatible(Caller, Callee, Args)) 1289 return false; 1290 1291 return llvm::none_of(Args, [](Argument *A) { 1292 auto *EltTy = cast<PointerType>(A->getType())->getElementType(); 1293 if (EltTy->isSized()) 1294 return (EltTy->isIntOrIntVectorTy(1) && 1295 EltTy->getPrimitiveSizeInBits() > 128); 1296 return false; 1297 }); 1298 } 1299 1300 bool PPCTTIImpl::canSaveCmp(Loop *L, BranchInst **BI, ScalarEvolution *SE, 1301 LoopInfo *LI, DominatorTree *DT, 1302 AssumptionCache *AC, TargetLibraryInfo *LibInfo) { 1303 // Process nested loops first. 1304 for (Loop::iterator I = L->begin(), E = L->end(); I != E; ++I) 1305 if (canSaveCmp(*I, BI, SE, LI, DT, AC, LibInfo)) 1306 return false; // Stop search. 1307 1308 HardwareLoopInfo HWLoopInfo(L); 1309 1310 if (!HWLoopInfo.canAnalyze(*LI)) 1311 return false; 1312 1313 if (!isHardwareLoopProfitable(L, *SE, *AC, LibInfo, HWLoopInfo)) 1314 return false; 1315 1316 if (!HWLoopInfo.isHardwareLoopCandidate(*SE, *LI, *DT)) 1317 return false; 1318 1319 *BI = HWLoopInfo.ExitBranch; 1320 return true; 1321 } 1322 1323 bool PPCTTIImpl::isLSRCostLess(TargetTransformInfo::LSRCost &C1, 1324 TargetTransformInfo::LSRCost &C2) { 1325 // PowerPC default behaviour here is "instruction number 1st priority". 1326 // If LsrNoInsnsCost is set, call default implementation. 1327 if (!LsrNoInsnsCost) 1328 return std::tie(C1.Insns, C1.NumRegs, C1.AddRecCost, C1.NumIVMuls, 1329 C1.NumBaseAdds, C1.ScaleCost, C1.ImmCost, C1.SetupCost) < 1330 std::tie(C2.Insns, C2.NumRegs, C2.AddRecCost, C2.NumIVMuls, 1331 C2.NumBaseAdds, C2.ScaleCost, C2.ImmCost, C2.SetupCost); 1332 else 1333 return TargetTransformInfoImplBase::isLSRCostLess(C1, C2); 1334 } 1335 1336 bool PPCTTIImpl::isNumRegsMajorCostOfLSR() { 1337 return false; 1338 } 1339 1340 bool PPCTTIImpl::shouldBuildRelLookupTables() const { 1341 const PPCTargetMachine &TM = ST->getTargetMachine(); 1342 // XCOFF hasn't implemented lowerRelativeReference, disable non-ELF for now. 1343 if (!TM.isELFv2ABI()) 1344 return false; 1345 return BaseT::shouldBuildRelLookupTables(); 1346 } 1347 1348 bool PPCTTIImpl::getTgtMemIntrinsic(IntrinsicInst *Inst, 1349 MemIntrinsicInfo &Info) { 1350 switch (Inst->getIntrinsicID()) { 1351 case Intrinsic::ppc_altivec_lvx: 1352 case Intrinsic::ppc_altivec_lvxl: 1353 case Intrinsic::ppc_altivec_lvebx: 1354 case Intrinsic::ppc_altivec_lvehx: 1355 case Intrinsic::ppc_altivec_lvewx: 1356 case Intrinsic::ppc_vsx_lxvd2x: 1357 case Intrinsic::ppc_vsx_lxvw4x: 1358 case Intrinsic::ppc_vsx_lxvd2x_be: 1359 case Intrinsic::ppc_vsx_lxvw4x_be: 1360 case Intrinsic::ppc_vsx_lxvl: 1361 case Intrinsic::ppc_vsx_lxvll: 1362 case Intrinsic::ppc_vsx_lxvp: { 1363 Info.PtrVal = Inst->getArgOperand(0); 1364 Info.ReadMem = true; 1365 Info.WriteMem = false; 1366 return true; 1367 } 1368 case Intrinsic::ppc_altivec_stvx: 1369 case Intrinsic::ppc_altivec_stvxl: 1370 case Intrinsic::ppc_altivec_stvebx: 1371 case Intrinsic::ppc_altivec_stvehx: 1372 case Intrinsic::ppc_altivec_stvewx: 1373 case Intrinsic::ppc_vsx_stxvd2x: 1374 case Intrinsic::ppc_vsx_stxvw4x: 1375 case Intrinsic::ppc_vsx_stxvd2x_be: 1376 case Intrinsic::ppc_vsx_stxvw4x_be: 1377 case Intrinsic::ppc_vsx_stxvl: 1378 case Intrinsic::ppc_vsx_stxvll: 1379 case Intrinsic::ppc_vsx_stxvp: { 1380 Info.PtrVal = Inst->getArgOperand(1); 1381 Info.ReadMem = false; 1382 Info.WriteMem = true; 1383 return true; 1384 } 1385 default: 1386 break; 1387 } 1388 1389 return false; 1390 } 1391