1 //===-- PPCTargetTransformInfo.cpp - PPC specific TTI ---------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "PPCTargetTransformInfo.h"
10 #include "llvm/Analysis/CodeMetrics.h"
11 #include "llvm/Analysis/TargetLibraryInfo.h"
12 #include "llvm/Analysis/TargetTransformInfo.h"
13 #include "llvm/CodeGen/BasicTTIImpl.h"
14 #include "llvm/CodeGen/CostTable.h"
15 #include "llvm/CodeGen/TargetLowering.h"
16 #include "llvm/CodeGen/TargetSchedule.h"
17 #include "llvm/IR/IntrinsicsPowerPC.h"
18 #include "llvm/Support/CommandLine.h"
19 #include "llvm/Support/Debug.h"
20 #include "llvm/Transforms/InstCombine/InstCombiner.h"
21 #include "llvm/Transforms/Utils/Local.h"
22 
23 using namespace llvm;
24 
25 #define DEBUG_TYPE "ppctti"
26 
27 static cl::opt<bool> DisablePPCConstHoist("disable-ppc-constant-hoisting",
28 cl::desc("disable constant hoisting on PPC"), cl::init(false), cl::Hidden);
29 
30 // This is currently only used for the data prefetch pass
31 static cl::opt<unsigned>
32 CacheLineSize("ppc-loop-prefetch-cache-line", cl::Hidden, cl::init(64),
33               cl::desc("The loop prefetch cache line size"));
34 
35 static cl::opt<bool>
36 EnablePPCColdCC("ppc-enable-coldcc", cl::Hidden, cl::init(false),
37                 cl::desc("Enable using coldcc calling conv for cold "
38                          "internal functions"));
39 
40 static cl::opt<bool>
41 LsrNoInsnsCost("ppc-lsr-no-insns-cost", cl::Hidden, cl::init(false),
42                cl::desc("Do not add instruction count to lsr cost model"));
43 
44 // The latency of mtctr is only justified if there are more than 4
45 // comparisons that will be removed as a result.
46 static cl::opt<unsigned>
47 SmallCTRLoopThreshold("min-ctr-loop-threshold", cl::init(4), cl::Hidden,
48                       cl::desc("Loops with a constant trip count smaller than "
49                                "this value will not use the count register."));
50 
51 //===----------------------------------------------------------------------===//
52 //
53 // PPC cost model.
54 //
55 //===----------------------------------------------------------------------===//
56 
57 TargetTransformInfo::PopcntSupportKind
58 PPCTTIImpl::getPopcntSupport(unsigned TyWidth) {
59   assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2");
60   if (ST->hasPOPCNTD() != PPCSubtarget::POPCNTD_Unavailable && TyWidth <= 64)
61     return ST->hasPOPCNTD() == PPCSubtarget::POPCNTD_Slow ?
62              TTI::PSK_SlowHardware : TTI::PSK_FastHardware;
63   return TTI::PSK_Software;
64 }
65 
66 Optional<Instruction *>
67 PPCTTIImpl::instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const {
68   Intrinsic::ID IID = II.getIntrinsicID();
69   switch (IID) {
70   default:
71     break;
72   case Intrinsic::ppc_altivec_lvx:
73   case Intrinsic::ppc_altivec_lvxl:
74     // Turn PPC lvx -> load if the pointer is known aligned.
75     if (getOrEnforceKnownAlignment(
76             II.getArgOperand(0), Align(16), IC.getDataLayout(), &II,
77             &IC.getAssumptionCache(), &IC.getDominatorTree()) >= 16) {
78       Value *Ptr = IC.Builder.CreateBitCast(
79           II.getArgOperand(0), PointerType::getUnqual(II.getType()));
80       return new LoadInst(II.getType(), Ptr, "", false, Align(16));
81     }
82     break;
83   case Intrinsic::ppc_vsx_lxvw4x:
84   case Intrinsic::ppc_vsx_lxvd2x: {
85     // Turn PPC VSX loads into normal loads.
86     Value *Ptr = IC.Builder.CreateBitCast(II.getArgOperand(0),
87                                           PointerType::getUnqual(II.getType()));
88     return new LoadInst(II.getType(), Ptr, Twine(""), false, Align(1));
89   }
90   case Intrinsic::ppc_altivec_stvx:
91   case Intrinsic::ppc_altivec_stvxl:
92     // Turn stvx -> store if the pointer is known aligned.
93     if (getOrEnforceKnownAlignment(
94             II.getArgOperand(1), Align(16), IC.getDataLayout(), &II,
95             &IC.getAssumptionCache(), &IC.getDominatorTree()) >= 16) {
96       Type *OpPtrTy = PointerType::getUnqual(II.getArgOperand(0)->getType());
97       Value *Ptr = IC.Builder.CreateBitCast(II.getArgOperand(1), OpPtrTy);
98       return new StoreInst(II.getArgOperand(0), Ptr, false, Align(16));
99     }
100     break;
101   case Intrinsic::ppc_vsx_stxvw4x:
102   case Intrinsic::ppc_vsx_stxvd2x: {
103     // Turn PPC VSX stores into normal stores.
104     Type *OpPtrTy = PointerType::getUnqual(II.getArgOperand(0)->getType());
105     Value *Ptr = IC.Builder.CreateBitCast(II.getArgOperand(1), OpPtrTy);
106     return new StoreInst(II.getArgOperand(0), Ptr, false, Align(1));
107   }
108   case Intrinsic::ppc_altivec_vperm:
109     // Turn vperm(V1,V2,mask) -> shuffle(V1,V2,mask) if mask is a constant.
110     // Note that ppc_altivec_vperm has a big-endian bias, so when creating
111     // a vectorshuffle for little endian, we must undo the transformation
112     // performed on vec_perm in altivec.h.  That is, we must complement
113     // the permutation mask with respect to 31 and reverse the order of
114     // V1 and V2.
115     if (Constant *Mask = dyn_cast<Constant>(II.getArgOperand(2))) {
116       assert(cast<FixedVectorType>(Mask->getType())->getNumElements() == 16 &&
117              "Bad type for intrinsic!");
118 
119       // Check that all of the elements are integer constants or undefs.
120       bool AllEltsOk = true;
121       for (unsigned i = 0; i != 16; ++i) {
122         Constant *Elt = Mask->getAggregateElement(i);
123         if (!Elt || !(isa<ConstantInt>(Elt) || isa<UndefValue>(Elt))) {
124           AllEltsOk = false;
125           break;
126         }
127       }
128 
129       if (AllEltsOk) {
130         // Cast the input vectors to byte vectors.
131         Value *Op0 =
132             IC.Builder.CreateBitCast(II.getArgOperand(0), Mask->getType());
133         Value *Op1 =
134             IC.Builder.CreateBitCast(II.getArgOperand(1), Mask->getType());
135         Value *Result = UndefValue::get(Op0->getType());
136 
137         // Only extract each element once.
138         Value *ExtractedElts[32];
139         memset(ExtractedElts, 0, sizeof(ExtractedElts));
140 
141         for (unsigned i = 0; i != 16; ++i) {
142           if (isa<UndefValue>(Mask->getAggregateElement(i)))
143             continue;
144           unsigned Idx =
145               cast<ConstantInt>(Mask->getAggregateElement(i))->getZExtValue();
146           Idx &= 31; // Match the hardware behavior.
147           if (DL.isLittleEndian())
148             Idx = 31 - Idx;
149 
150           if (!ExtractedElts[Idx]) {
151             Value *Op0ToUse = (DL.isLittleEndian()) ? Op1 : Op0;
152             Value *Op1ToUse = (DL.isLittleEndian()) ? Op0 : Op1;
153             ExtractedElts[Idx] = IC.Builder.CreateExtractElement(
154                 Idx < 16 ? Op0ToUse : Op1ToUse, IC.Builder.getInt32(Idx & 15));
155           }
156 
157           // Insert this value into the result vector.
158           Result = IC.Builder.CreateInsertElement(Result, ExtractedElts[Idx],
159                                                   IC.Builder.getInt32(i));
160         }
161         return CastInst::Create(Instruction::BitCast, Result, II.getType());
162       }
163     }
164     break;
165   }
166   return None;
167 }
168 
169 int PPCTTIImpl::getIntImmCost(const APInt &Imm, Type *Ty,
170                               TTI::TargetCostKind CostKind) {
171   if (DisablePPCConstHoist)
172     return BaseT::getIntImmCost(Imm, Ty, CostKind);
173 
174   assert(Ty->isIntegerTy());
175 
176   unsigned BitSize = Ty->getPrimitiveSizeInBits();
177   if (BitSize == 0)
178     return ~0U;
179 
180   if (Imm == 0)
181     return TTI::TCC_Free;
182 
183   if (Imm.getBitWidth() <= 64) {
184     if (isInt<16>(Imm.getSExtValue()))
185       return TTI::TCC_Basic;
186 
187     if (isInt<32>(Imm.getSExtValue())) {
188       // A constant that can be materialized using lis.
189       if ((Imm.getZExtValue() & 0xFFFF) == 0)
190         return TTI::TCC_Basic;
191 
192       return 2 * TTI::TCC_Basic;
193     }
194   }
195 
196   return 4 * TTI::TCC_Basic;
197 }
198 
199 int PPCTTIImpl::getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx,
200                                     const APInt &Imm, Type *Ty,
201                                     TTI::TargetCostKind CostKind) {
202   if (DisablePPCConstHoist)
203     return BaseT::getIntImmCostIntrin(IID, Idx, Imm, Ty, CostKind);
204 
205   assert(Ty->isIntegerTy());
206 
207   unsigned BitSize = Ty->getPrimitiveSizeInBits();
208   if (BitSize == 0)
209     return ~0U;
210 
211   switch (IID) {
212   default:
213     return TTI::TCC_Free;
214   case Intrinsic::sadd_with_overflow:
215   case Intrinsic::uadd_with_overflow:
216   case Intrinsic::ssub_with_overflow:
217   case Intrinsic::usub_with_overflow:
218     if ((Idx == 1) && Imm.getBitWidth() <= 64 && isInt<16>(Imm.getSExtValue()))
219       return TTI::TCC_Free;
220     break;
221   case Intrinsic::experimental_stackmap:
222     if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
223       return TTI::TCC_Free;
224     break;
225   case Intrinsic::experimental_patchpoint_void:
226   case Intrinsic::experimental_patchpoint_i64:
227     if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
228       return TTI::TCC_Free;
229     break;
230   }
231   return PPCTTIImpl::getIntImmCost(Imm, Ty, CostKind);
232 }
233 
234 int PPCTTIImpl::getIntImmCostInst(unsigned Opcode, unsigned Idx,
235                                   const APInt &Imm, Type *Ty,
236                                   TTI::TargetCostKind CostKind) {
237   if (DisablePPCConstHoist)
238     return BaseT::getIntImmCostInst(Opcode, Idx, Imm, Ty, CostKind);
239 
240   assert(Ty->isIntegerTy());
241 
242   unsigned BitSize = Ty->getPrimitiveSizeInBits();
243   if (BitSize == 0)
244     return ~0U;
245 
246   unsigned ImmIdx = ~0U;
247   bool ShiftedFree = false, RunFree = false, UnsignedFree = false,
248        ZeroFree = false;
249   switch (Opcode) {
250   default:
251     return TTI::TCC_Free;
252   case Instruction::GetElementPtr:
253     // Always hoist the base address of a GetElementPtr. This prevents the
254     // creation of new constants for every base constant that gets constant
255     // folded with the offset.
256     if (Idx == 0)
257       return 2 * TTI::TCC_Basic;
258     return TTI::TCC_Free;
259   case Instruction::And:
260     RunFree = true; // (for the rotate-and-mask instructions)
261     LLVM_FALLTHROUGH;
262   case Instruction::Add:
263   case Instruction::Or:
264   case Instruction::Xor:
265     ShiftedFree = true;
266     LLVM_FALLTHROUGH;
267   case Instruction::Sub:
268   case Instruction::Mul:
269   case Instruction::Shl:
270   case Instruction::LShr:
271   case Instruction::AShr:
272     ImmIdx = 1;
273     break;
274   case Instruction::ICmp:
275     UnsignedFree = true;
276     ImmIdx = 1;
277     // Zero comparisons can use record-form instructions.
278     LLVM_FALLTHROUGH;
279   case Instruction::Select:
280     ZeroFree = true;
281     break;
282   case Instruction::PHI:
283   case Instruction::Call:
284   case Instruction::Ret:
285   case Instruction::Load:
286   case Instruction::Store:
287     break;
288   }
289 
290   if (ZeroFree && Imm == 0)
291     return TTI::TCC_Free;
292 
293   if (Idx == ImmIdx && Imm.getBitWidth() <= 64) {
294     if (isInt<16>(Imm.getSExtValue()))
295       return TTI::TCC_Free;
296 
297     if (RunFree) {
298       if (Imm.getBitWidth() <= 32 &&
299           (isShiftedMask_32(Imm.getZExtValue()) ||
300            isShiftedMask_32(~Imm.getZExtValue())))
301         return TTI::TCC_Free;
302 
303       if (ST->isPPC64() &&
304           (isShiftedMask_64(Imm.getZExtValue()) ||
305            isShiftedMask_64(~Imm.getZExtValue())))
306         return TTI::TCC_Free;
307     }
308 
309     if (UnsignedFree && isUInt<16>(Imm.getZExtValue()))
310       return TTI::TCC_Free;
311 
312     if (ShiftedFree && (Imm.getZExtValue() & 0xFFFF) == 0)
313       return TTI::TCC_Free;
314   }
315 
316   return PPCTTIImpl::getIntImmCost(Imm, Ty, CostKind);
317 }
318 
319 unsigned
320 PPCTTIImpl::getUserCost(const User *U, ArrayRef<const Value *> Operands,
321                         TTI::TargetCostKind CostKind) {
322   // We already implement getCastInstrCost and getMemoryOpCost where we perform
323   // the vector adjustment there.
324   if (isa<CastInst>(U) || isa<LoadInst>(U) || isa<StoreInst>(U))
325     return BaseT::getUserCost(U, Operands, CostKind);
326 
327   if (U->getType()->isVectorTy()) {
328     // Instructions that need to be split should cost more.
329     std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, U->getType());
330     return LT.first * BaseT::getUserCost(U, Operands, CostKind);
331   }
332 
333   return BaseT::getUserCost(U, Operands, CostKind);
334 }
335 
336 bool PPCTTIImpl::mightUseCTR(BasicBlock *BB, TargetLibraryInfo *LibInfo,
337                              SmallPtrSetImpl<const Value *> &Visited) {
338   const PPCTargetMachine &TM = ST->getTargetMachine();
339 
340   // Loop through the inline asm constraints and look for something that
341   // clobbers ctr.
342   auto asmClobbersCTR = [](InlineAsm *IA) {
343     InlineAsm::ConstraintInfoVector CIV = IA->ParseConstraints();
344     for (unsigned i = 0, ie = CIV.size(); i < ie; ++i) {
345       InlineAsm::ConstraintInfo &C = CIV[i];
346       if (C.Type != InlineAsm::isInput)
347         for (unsigned j = 0, je = C.Codes.size(); j < je; ++j)
348           if (StringRef(C.Codes[j]).equals_lower("{ctr}"))
349             return true;
350     }
351     return false;
352   };
353 
354   // Determining the address of a TLS variable results in a function call in
355   // certain TLS models.
356   std::function<bool(const Value *)> memAddrUsesCTR =
357       [&memAddrUsesCTR, &TM, &Visited](const Value *MemAddr) -> bool {
358     // No need to traverse again if we already checked this operand.
359     if (!Visited.insert(MemAddr).second)
360       return false;
361     const auto *GV = dyn_cast<GlobalValue>(MemAddr);
362     if (!GV) {
363       // Recurse to check for constants that refer to TLS global variables.
364       if (const auto *CV = dyn_cast<Constant>(MemAddr))
365         for (const auto &CO : CV->operands())
366           if (memAddrUsesCTR(CO))
367             return true;
368 
369       return false;
370     }
371 
372     if (!GV->isThreadLocal())
373       return false;
374     TLSModel::Model Model = TM.getTLSModel(GV);
375     return Model == TLSModel::GeneralDynamic ||
376       Model == TLSModel::LocalDynamic;
377   };
378 
379   auto isLargeIntegerTy = [](bool Is32Bit, Type *Ty) {
380     if (IntegerType *ITy = dyn_cast<IntegerType>(Ty))
381       return ITy->getBitWidth() > (Is32Bit ? 32U : 64U);
382 
383     return false;
384   };
385 
386   for (BasicBlock::iterator J = BB->begin(), JE = BB->end();
387        J != JE; ++J) {
388     if (CallInst *CI = dyn_cast<CallInst>(J)) {
389       // Inline ASM is okay, unless it clobbers the ctr register.
390       if (InlineAsm *IA = dyn_cast<InlineAsm>(CI->getCalledOperand())) {
391         if (asmClobbersCTR(IA))
392           return true;
393         continue;
394       }
395 
396       if (Function *F = CI->getCalledFunction()) {
397         // Most intrinsics don't become function calls, but some might.
398         // sin, cos, exp and log are always calls.
399         unsigned Opcode = 0;
400         if (F->getIntrinsicID() != Intrinsic::not_intrinsic) {
401           switch (F->getIntrinsicID()) {
402           default: continue;
403           // If we have a call to loop_decrement or set_loop_iterations,
404           // we're definitely using CTR.
405           case Intrinsic::set_loop_iterations:
406           case Intrinsic::loop_decrement:
407             return true;
408 
409           // Binary operations on 128-bit value will use CTR.
410           case Intrinsic::experimental_constrained_fadd:
411           case Intrinsic::experimental_constrained_fsub:
412           case Intrinsic::experimental_constrained_fmul:
413           case Intrinsic::experimental_constrained_fdiv:
414           case Intrinsic::experimental_constrained_frem:
415             if (F->getType()->getScalarType()->isFP128Ty() ||
416                 F->getType()->getScalarType()->isPPC_FP128Ty())
417               return true;
418             break;
419 
420           case Intrinsic::experimental_constrained_fptosi:
421           case Intrinsic::experimental_constrained_fptoui:
422           case Intrinsic::experimental_constrained_sitofp:
423           case Intrinsic::experimental_constrained_uitofp: {
424             Type *SrcType = CI->getArgOperand(0)->getType()->getScalarType();
425             Type *DstType = CI->getType()->getScalarType();
426             if (SrcType->isPPC_FP128Ty() || DstType->isPPC_FP128Ty() ||
427                 isLargeIntegerTy(!TM.isPPC64(), SrcType) ||
428                 isLargeIntegerTy(!TM.isPPC64(), DstType))
429               return true;
430             break;
431           }
432 
433           // Exclude eh_sjlj_setjmp; we don't need to exclude eh_sjlj_longjmp
434           // because, although it does clobber the counter register, the
435           // control can't then return to inside the loop unless there is also
436           // an eh_sjlj_setjmp.
437           case Intrinsic::eh_sjlj_setjmp:
438 
439           case Intrinsic::memcpy:
440           case Intrinsic::memmove:
441           case Intrinsic::memset:
442           case Intrinsic::powi:
443           case Intrinsic::log:
444           case Intrinsic::log2:
445           case Intrinsic::log10:
446           case Intrinsic::exp:
447           case Intrinsic::exp2:
448           case Intrinsic::pow:
449           case Intrinsic::sin:
450           case Intrinsic::cos:
451           case Intrinsic::experimental_constrained_powi:
452           case Intrinsic::experimental_constrained_log:
453           case Intrinsic::experimental_constrained_log2:
454           case Intrinsic::experimental_constrained_log10:
455           case Intrinsic::experimental_constrained_exp:
456           case Intrinsic::experimental_constrained_exp2:
457           case Intrinsic::experimental_constrained_pow:
458           case Intrinsic::experimental_constrained_sin:
459           case Intrinsic::experimental_constrained_cos:
460             return true;
461           case Intrinsic::copysign:
462             if (CI->getArgOperand(0)->getType()->getScalarType()->
463                 isPPC_FP128Ty())
464               return true;
465             else
466               continue; // ISD::FCOPYSIGN is never a library call.
467           case Intrinsic::fma:                Opcode = ISD::FMA;        break;
468           case Intrinsic::sqrt:               Opcode = ISD::FSQRT;      break;
469           case Intrinsic::floor:              Opcode = ISD::FFLOOR;     break;
470           case Intrinsic::ceil:               Opcode = ISD::FCEIL;      break;
471           case Intrinsic::trunc:              Opcode = ISD::FTRUNC;     break;
472           case Intrinsic::rint:               Opcode = ISD::FRINT;      break;
473           case Intrinsic::lrint:              Opcode = ISD::LRINT;      break;
474           case Intrinsic::llrint:             Opcode = ISD::LLRINT;     break;
475           case Intrinsic::nearbyint:          Opcode = ISD::FNEARBYINT; break;
476           case Intrinsic::round:              Opcode = ISD::FROUND;     break;
477           case Intrinsic::lround:             Opcode = ISD::LROUND;     break;
478           case Intrinsic::llround:            Opcode = ISD::LLROUND;    break;
479           case Intrinsic::minnum:             Opcode = ISD::FMINNUM;    break;
480           case Intrinsic::maxnum:             Opcode = ISD::FMAXNUM;    break;
481           case Intrinsic::experimental_constrained_fcmp:
482             Opcode = ISD::STRICT_FSETCC;
483             break;
484           case Intrinsic::experimental_constrained_fcmps:
485             Opcode = ISD::STRICT_FSETCCS;
486             break;
487           case Intrinsic::experimental_constrained_fma:
488             Opcode = ISD::STRICT_FMA;
489             break;
490           case Intrinsic::experimental_constrained_sqrt:
491             Opcode = ISD::STRICT_FSQRT;
492             break;
493           case Intrinsic::experimental_constrained_floor:
494             Opcode = ISD::STRICT_FFLOOR;
495             break;
496           case Intrinsic::experimental_constrained_ceil:
497             Opcode = ISD::STRICT_FCEIL;
498             break;
499           case Intrinsic::experimental_constrained_trunc:
500             Opcode = ISD::STRICT_FTRUNC;
501             break;
502           case Intrinsic::experimental_constrained_rint:
503             Opcode = ISD::STRICT_FRINT;
504             break;
505           case Intrinsic::experimental_constrained_lrint:
506             Opcode = ISD::STRICT_LRINT;
507             break;
508           case Intrinsic::experimental_constrained_llrint:
509             Opcode = ISD::STRICT_LLRINT;
510             break;
511           case Intrinsic::experimental_constrained_nearbyint:
512             Opcode = ISD::STRICT_FNEARBYINT;
513             break;
514           case Intrinsic::experimental_constrained_round:
515             Opcode = ISD::STRICT_FROUND;
516             break;
517           case Intrinsic::experimental_constrained_lround:
518             Opcode = ISD::STRICT_LROUND;
519             break;
520           case Intrinsic::experimental_constrained_llround:
521             Opcode = ISD::STRICT_LLROUND;
522             break;
523           case Intrinsic::experimental_constrained_minnum:
524             Opcode = ISD::STRICT_FMINNUM;
525             break;
526           case Intrinsic::experimental_constrained_maxnum:
527             Opcode = ISD::STRICT_FMAXNUM;
528             break;
529           case Intrinsic::umul_with_overflow: Opcode = ISD::UMULO;      break;
530           case Intrinsic::smul_with_overflow: Opcode = ISD::SMULO;      break;
531           }
532         }
533 
534         // PowerPC does not use [US]DIVREM or other library calls for
535         // operations on regular types which are not otherwise library calls
536         // (i.e. soft float or atomics). If adapting for targets that do,
537         // additional care is required here.
538 
539         LibFunc Func;
540         if (!F->hasLocalLinkage() && F->hasName() && LibInfo &&
541             LibInfo->getLibFunc(F->getName(), Func) &&
542             LibInfo->hasOptimizedCodeGen(Func)) {
543           // Non-read-only functions are never treated as intrinsics.
544           if (!CI->onlyReadsMemory())
545             return true;
546 
547           // Conversion happens only for FP calls.
548           if (!CI->getArgOperand(0)->getType()->isFloatingPointTy())
549             return true;
550 
551           switch (Func) {
552           default: return true;
553           case LibFunc_copysign:
554           case LibFunc_copysignf:
555             continue; // ISD::FCOPYSIGN is never a library call.
556           case LibFunc_copysignl:
557             return true;
558           case LibFunc_fabs:
559           case LibFunc_fabsf:
560           case LibFunc_fabsl:
561             continue; // ISD::FABS is never a library call.
562           case LibFunc_sqrt:
563           case LibFunc_sqrtf:
564           case LibFunc_sqrtl:
565             Opcode = ISD::FSQRT; break;
566           case LibFunc_floor:
567           case LibFunc_floorf:
568           case LibFunc_floorl:
569             Opcode = ISD::FFLOOR; break;
570           case LibFunc_nearbyint:
571           case LibFunc_nearbyintf:
572           case LibFunc_nearbyintl:
573             Opcode = ISD::FNEARBYINT; break;
574           case LibFunc_ceil:
575           case LibFunc_ceilf:
576           case LibFunc_ceill:
577             Opcode = ISD::FCEIL; break;
578           case LibFunc_rint:
579           case LibFunc_rintf:
580           case LibFunc_rintl:
581             Opcode = ISD::FRINT; break;
582           case LibFunc_round:
583           case LibFunc_roundf:
584           case LibFunc_roundl:
585             Opcode = ISD::FROUND; break;
586           case LibFunc_trunc:
587           case LibFunc_truncf:
588           case LibFunc_truncl:
589             Opcode = ISD::FTRUNC; break;
590           case LibFunc_fmin:
591           case LibFunc_fminf:
592           case LibFunc_fminl:
593             Opcode = ISD::FMINNUM; break;
594           case LibFunc_fmax:
595           case LibFunc_fmaxf:
596           case LibFunc_fmaxl:
597             Opcode = ISD::FMAXNUM; break;
598           }
599         }
600 
601         if (Opcode) {
602           EVT EVTy =
603               TLI->getValueType(DL, CI->getArgOperand(0)->getType(), true);
604 
605           if (EVTy == MVT::Other)
606             return true;
607 
608           if (TLI->isOperationLegalOrCustom(Opcode, EVTy))
609             continue;
610           else if (EVTy.isVector() &&
611                    TLI->isOperationLegalOrCustom(Opcode, EVTy.getScalarType()))
612             continue;
613 
614           return true;
615         }
616       }
617 
618       return true;
619     } else if (isa<BinaryOperator>(J) &&
620                (J->getType()->getScalarType()->isFP128Ty() ||
621                 J->getType()->getScalarType()->isPPC_FP128Ty())) {
622       // Most operations on f128 or ppc_f128 values become calls.
623       return true;
624     } else if (isa<UIToFPInst>(J) || isa<SIToFPInst>(J) ||
625                isa<FPToUIInst>(J) || isa<FPToSIInst>(J)) {
626       CastInst *CI = cast<CastInst>(J);
627       if (CI->getSrcTy()->getScalarType()->isPPC_FP128Ty() ||
628           CI->getDestTy()->getScalarType()->isPPC_FP128Ty() ||
629           isLargeIntegerTy(!TM.isPPC64(), CI->getSrcTy()->getScalarType()) ||
630           isLargeIntegerTy(!TM.isPPC64(), CI->getDestTy()->getScalarType()))
631         return true;
632     } else if (isLargeIntegerTy(!TM.isPPC64(),
633                                 J->getType()->getScalarType()) &&
634                (J->getOpcode() == Instruction::UDiv ||
635                 J->getOpcode() == Instruction::SDiv ||
636                 J->getOpcode() == Instruction::URem ||
637                 J->getOpcode() == Instruction::SRem)) {
638       return true;
639     } else if (!TM.isPPC64() &&
640                isLargeIntegerTy(false, J->getType()->getScalarType()) &&
641                (J->getOpcode() == Instruction::Shl ||
642                 J->getOpcode() == Instruction::AShr ||
643                 J->getOpcode() == Instruction::LShr)) {
644       // Only on PPC32, for 128-bit integers (specifically not 64-bit
645       // integers), these might be runtime calls.
646       return true;
647     } else if (isa<IndirectBrInst>(J) || isa<InvokeInst>(J)) {
648       // On PowerPC, indirect jumps use the counter register.
649       return true;
650     } else if (SwitchInst *SI = dyn_cast<SwitchInst>(J)) {
651       if (SI->getNumCases() + 1 >= (unsigned)TLI->getMinimumJumpTableEntries())
652         return true;
653     }
654 
655     // FREM is always a call.
656     if (J->getOpcode() == Instruction::FRem)
657       return true;
658 
659     if (ST->useSoftFloat()) {
660       switch(J->getOpcode()) {
661       case Instruction::FAdd:
662       case Instruction::FSub:
663       case Instruction::FMul:
664       case Instruction::FDiv:
665       case Instruction::FPTrunc:
666       case Instruction::FPExt:
667       case Instruction::FPToUI:
668       case Instruction::FPToSI:
669       case Instruction::UIToFP:
670       case Instruction::SIToFP:
671       case Instruction::FCmp:
672         return true;
673       }
674     }
675 
676     for (Value *Operand : J->operands())
677       if (memAddrUsesCTR(Operand))
678         return true;
679   }
680 
681   return false;
682 }
683 
684 bool PPCTTIImpl::isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE,
685                                           AssumptionCache &AC,
686                                           TargetLibraryInfo *LibInfo,
687                                           HardwareLoopInfo &HWLoopInfo) {
688   const PPCTargetMachine &TM = ST->getTargetMachine();
689   TargetSchedModel SchedModel;
690   SchedModel.init(ST);
691 
692   // Do not convert small short loops to CTR loop.
693   unsigned ConstTripCount = SE.getSmallConstantTripCount(L);
694   if (ConstTripCount && ConstTripCount < SmallCTRLoopThreshold) {
695     SmallPtrSet<const Value *, 32> EphValues;
696     CodeMetrics::collectEphemeralValues(L, &AC, EphValues);
697     CodeMetrics Metrics;
698     for (BasicBlock *BB : L->blocks())
699       Metrics.analyzeBasicBlock(BB, *this, EphValues);
700     // 6 is an approximate latency for the mtctr instruction.
701     if (Metrics.NumInsts <= (6 * SchedModel.getIssueWidth()))
702       return false;
703   }
704 
705   // We don't want to spill/restore the counter register, and so we don't
706   // want to use the counter register if the loop contains calls.
707   SmallPtrSet<const Value *, 4> Visited;
708   for (Loop::block_iterator I = L->block_begin(), IE = L->block_end();
709        I != IE; ++I)
710     if (mightUseCTR(*I, LibInfo, Visited))
711       return false;
712 
713   SmallVector<BasicBlock*, 4> ExitingBlocks;
714   L->getExitingBlocks(ExitingBlocks);
715 
716   // If there is an exit edge known to be frequently taken,
717   // we should not transform this loop.
718   for (auto &BB : ExitingBlocks) {
719     Instruction *TI = BB->getTerminator();
720     if (!TI) continue;
721 
722     if (BranchInst *BI = dyn_cast<BranchInst>(TI)) {
723       uint64_t TrueWeight = 0, FalseWeight = 0;
724       if (!BI->isConditional() ||
725           !BI->extractProfMetadata(TrueWeight, FalseWeight))
726         continue;
727 
728       // If the exit path is more frequent than the loop path,
729       // we return here without further analysis for this loop.
730       bool TrueIsExit = !L->contains(BI->getSuccessor(0));
731       if (( TrueIsExit && FalseWeight < TrueWeight) ||
732           (!TrueIsExit && FalseWeight > TrueWeight))
733         return false;
734     }
735   }
736 
737   LLVMContext &C = L->getHeader()->getContext();
738   HWLoopInfo.CountType = TM.isPPC64() ?
739     Type::getInt64Ty(C) : Type::getInt32Ty(C);
740   HWLoopInfo.LoopDecrement = ConstantInt::get(HWLoopInfo.CountType, 1);
741   return true;
742 }
743 
744 void PPCTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
745                                          TTI::UnrollingPreferences &UP) {
746   if (ST->getCPUDirective() == PPC::DIR_A2) {
747     // The A2 is in-order with a deep pipeline, and concatenation unrolling
748     // helps expose latency-hiding opportunities to the instruction scheduler.
749     UP.Partial = UP.Runtime = true;
750 
751     // We unroll a lot on the A2 (hundreds of instructions), and the benefits
752     // often outweigh the cost of a division to compute the trip count.
753     UP.AllowExpensiveTripCount = true;
754   }
755 
756   BaseT::getUnrollingPreferences(L, SE, UP);
757 }
758 
759 void PPCTTIImpl::getPeelingPreferences(Loop *L, ScalarEvolution &SE,
760                                        TTI::PeelingPreferences &PP) {
761   BaseT::getPeelingPreferences(L, SE, PP);
762 }
763 // This function returns true to allow using coldcc calling convention.
764 // Returning true results in coldcc being used for functions which are cold at
765 // all call sites when the callers of the functions are not calling any other
766 // non coldcc functions.
767 bool PPCTTIImpl::useColdCCForColdCall(Function &F) {
768   return EnablePPCColdCC;
769 }
770 
771 bool PPCTTIImpl::enableAggressiveInterleaving(bool LoopHasReductions) {
772   // On the A2, always unroll aggressively.
773   if (ST->getCPUDirective() == PPC::DIR_A2)
774     return true;
775 
776   return LoopHasReductions;
777 }
778 
779 PPCTTIImpl::TTI::MemCmpExpansionOptions
780 PPCTTIImpl::enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const {
781   TTI::MemCmpExpansionOptions Options;
782   Options.LoadSizes = {8, 4, 2, 1};
783   Options.MaxNumLoads = TLI->getMaxExpandSizeMemcmp(OptSize);
784   return Options;
785 }
786 
787 bool PPCTTIImpl::enableInterleavedAccessVectorization() {
788   return true;
789 }
790 
791 unsigned PPCTTIImpl::getNumberOfRegisters(unsigned ClassID) const {
792   assert(ClassID == GPRRC || ClassID == FPRRC ||
793          ClassID == VRRC || ClassID == VSXRC);
794   if (ST->hasVSX()) {
795     assert(ClassID == GPRRC || ClassID == VSXRC || ClassID == VRRC);
796     return ClassID == VSXRC ? 64 : 32;
797   }
798   assert(ClassID == GPRRC || ClassID == FPRRC || ClassID == VRRC);
799   return 32;
800 }
801 
802 unsigned PPCTTIImpl::getRegisterClassForType(bool Vector, Type *Ty) const {
803   if (Vector)
804     return ST->hasVSX() ? VSXRC : VRRC;
805   else if (Ty && (Ty->getScalarType()->isFloatTy() ||
806                   Ty->getScalarType()->isDoubleTy()))
807     return ST->hasVSX() ? VSXRC : FPRRC;
808   else if (Ty && (Ty->getScalarType()->isFP128Ty() ||
809                   Ty->getScalarType()->isPPC_FP128Ty()))
810     return VRRC;
811   else if (Ty && Ty->getScalarType()->isHalfTy())
812     return VSXRC;
813   else
814     return GPRRC;
815 }
816 
817 const char* PPCTTIImpl::getRegisterClassName(unsigned ClassID) const {
818 
819   switch (ClassID) {
820     default:
821       llvm_unreachable("unknown register class");
822       return "PPC::unknown register class";
823     case GPRRC:       return "PPC::GPRRC";
824     case FPRRC:       return "PPC::FPRRC";
825     case VRRC:        return "PPC::VRRC";
826     case VSXRC:       return "PPC::VSXRC";
827   }
828 }
829 
830 unsigned PPCTTIImpl::getRegisterBitWidth(bool Vector) const {
831   if (Vector) {
832     if (ST->hasAltivec()) return 128;
833     return 0;
834   }
835 
836   if (ST->isPPC64())
837     return 64;
838   return 32;
839 
840 }
841 
842 unsigned PPCTTIImpl::getCacheLineSize() const {
843   // Check first if the user specified a custom line size.
844   if (CacheLineSize.getNumOccurrences() > 0)
845     return CacheLineSize;
846 
847   // Starting with P7 we have a cache line size of 128.
848   unsigned Directive = ST->getCPUDirective();
849   // Assume that Future CPU has the same cache line size as the others.
850   if (Directive == PPC::DIR_PWR7 || Directive == PPC::DIR_PWR8 ||
851       Directive == PPC::DIR_PWR9 || Directive == PPC::DIR_PWR10 ||
852       Directive == PPC::DIR_PWR_FUTURE)
853     return 128;
854 
855   // On other processors return a default of 64 bytes.
856   return 64;
857 }
858 
859 unsigned PPCTTIImpl::getPrefetchDistance() const {
860   return 300;
861 }
862 
863 unsigned PPCTTIImpl::getMaxInterleaveFactor(unsigned VF) {
864   unsigned Directive = ST->getCPUDirective();
865   // The 440 has no SIMD support, but floating-point instructions
866   // have a 5-cycle latency, so unroll by 5x for latency hiding.
867   if (Directive == PPC::DIR_440)
868     return 5;
869 
870   // The A2 has no SIMD support, but floating-point instructions
871   // have a 6-cycle latency, so unroll by 6x for latency hiding.
872   if (Directive == PPC::DIR_A2)
873     return 6;
874 
875   // FIXME: For lack of any better information, do no harm...
876   if (Directive == PPC::DIR_E500mc || Directive == PPC::DIR_E5500)
877     return 1;
878 
879   // For P7 and P8, floating-point instructions have a 6-cycle latency and
880   // there are two execution units, so unroll by 12x for latency hiding.
881   // FIXME: the same for P9 as previous gen until POWER9 scheduling is ready
882   // FIXME: the same for P10 as previous gen until POWER10 scheduling is ready
883   // Assume that future is the same as the others.
884   if (Directive == PPC::DIR_PWR7 || Directive == PPC::DIR_PWR8 ||
885       Directive == PPC::DIR_PWR9 || Directive == PPC::DIR_PWR10 ||
886       Directive == PPC::DIR_PWR_FUTURE)
887     return 12;
888 
889   // For most things, modern systems have two execution units (and
890   // out-of-order execution).
891   return 2;
892 }
893 
894 // Adjust the cost of vector instructions on targets which there is overlap
895 // between the vector and scalar units, thereby reducing the overall throughput
896 // of vector code wrt. scalar code.
897 int PPCTTIImpl::vectorCostAdjustment(int Cost, unsigned Opcode, Type *Ty1,
898                                      Type *Ty2) {
899   if (!ST->vectorsUseTwoUnits() || !Ty1->isVectorTy())
900     return Cost;
901 
902   std::pair<int, MVT> LT1 = TLI->getTypeLegalizationCost(DL, Ty1);
903   // If type legalization involves splitting the vector, we don't want to
904   // double the cost at every step - only the last step.
905   if (LT1.first != 1 || !LT1.second.isVector())
906     return Cost;
907 
908   int ISD = TLI->InstructionOpcodeToISD(Opcode);
909   if (TLI->isOperationExpand(ISD, LT1.second))
910     return Cost;
911 
912   if (Ty2) {
913     std::pair<int, MVT> LT2 = TLI->getTypeLegalizationCost(DL, Ty2);
914     if (LT2.first != 1 || !LT2.second.isVector())
915       return Cost;
916   }
917 
918   return Cost * 2;
919 }
920 
921 int PPCTTIImpl::getArithmeticInstrCost(unsigned Opcode, Type *Ty,
922                                        TTI::TargetCostKind CostKind,
923                                        TTI::OperandValueKind Op1Info,
924                                        TTI::OperandValueKind Op2Info,
925                                        TTI::OperandValueProperties Opd1PropInfo,
926                                        TTI::OperandValueProperties Opd2PropInfo,
927                                        ArrayRef<const Value *> Args,
928                                        const Instruction *CxtI) {
929   assert(TLI->InstructionOpcodeToISD(Opcode) && "Invalid opcode");
930   // TODO: Handle more cost kinds.
931   if (CostKind != TTI::TCK_RecipThroughput)
932     return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info,
933                                          Op2Info, Opd1PropInfo,
934                                          Opd2PropInfo, Args, CxtI);
935 
936   // Fallback to the default implementation.
937   int Cost = BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info,
938                                            Op2Info,
939                                            Opd1PropInfo, Opd2PropInfo);
940   return vectorCostAdjustment(Cost, Opcode, Ty, nullptr);
941 }
942 
943 int PPCTTIImpl::getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index,
944                                Type *SubTp) {
945   // Legalize the type.
946   std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
947 
948   // PPC, for both Altivec/VSX, support cheap arbitrary permutations
949   // (at least in the sense that there need only be one non-loop-invariant
950   // instruction). We need one such shuffle instruction for each actual
951   // register (this is not true for arbitrary shuffles, but is true for the
952   // structured types of shuffles covered by TTI::ShuffleKind).
953   return vectorCostAdjustment(LT.first, Instruction::ShuffleVector, Tp,
954                               nullptr);
955 }
956 
957 int PPCTTIImpl::getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind) {
958   if (CostKind != TTI::TCK_RecipThroughput)
959     return Opcode == Instruction::PHI ? 0 : 1;
960   // Branches are assumed to be predicted.
961   return CostKind == TTI::TCK_RecipThroughput ? 0 : 1;
962 }
963 
964 int PPCTTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
965                                  TTI::CastContextHint CCH,
966                                  TTI::TargetCostKind CostKind,
967                                  const Instruction *I) {
968   assert(TLI->InstructionOpcodeToISD(Opcode) && "Invalid opcode");
969 
970   int Cost = BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I);
971   Cost = vectorCostAdjustment(Cost, Opcode, Dst, Src);
972   // TODO: Allow non-throughput costs that aren't binary.
973   if (CostKind != TTI::TCK_RecipThroughput)
974     return Cost == 0 ? 0 : 1;
975   return Cost;
976 }
977 
978 int PPCTTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy,
979                                    TTI::TargetCostKind CostKind,
980                                    const Instruction *I) {
981   int Cost = BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, CostKind, I);
982   // TODO: Handle other cost kinds.
983   if (CostKind != TTI::TCK_RecipThroughput)
984     return Cost;
985   return vectorCostAdjustment(Cost, Opcode, ValTy, nullptr);
986 }
987 
988 int PPCTTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index) {
989   assert(Val->isVectorTy() && "This must be a vector type");
990 
991   int ISD = TLI->InstructionOpcodeToISD(Opcode);
992   assert(ISD && "Invalid opcode");
993 
994   int Cost = BaseT::getVectorInstrCost(Opcode, Val, Index);
995   Cost = vectorCostAdjustment(Cost, Opcode, Val, nullptr);
996 
997   if (ST->hasVSX() && Val->getScalarType()->isDoubleTy()) {
998     // Double-precision scalars are already located in index #0 (or #1 if LE).
999     if (ISD == ISD::EXTRACT_VECTOR_ELT &&
1000         Index == (ST->isLittleEndian() ? 1 : 0))
1001       return 0;
1002 
1003     return Cost;
1004 
1005   } else if (Val->getScalarType()->isIntegerTy() && Index != -1U) {
1006     if (ST->hasP9Altivec()) {
1007       if (ISD == ISD::INSERT_VECTOR_ELT)
1008         // A move-to VSR and a permute/insert.  Assume vector operation cost
1009         // for both (cost will be 2x on P9).
1010         return vectorCostAdjustment(2, Opcode, Val, nullptr);
1011 
1012       // It's an extract.  Maybe we can do a cheap move-from VSR.
1013       unsigned EltSize = Val->getScalarSizeInBits();
1014       if (EltSize == 64) {
1015         unsigned MfvsrdIndex = ST->isLittleEndian() ? 1 : 0;
1016         if (Index == MfvsrdIndex)
1017           return 1;
1018       } else if (EltSize == 32) {
1019         unsigned MfvsrwzIndex = ST->isLittleEndian() ? 2 : 1;
1020         if (Index == MfvsrwzIndex)
1021           return 1;
1022       }
1023 
1024       // We need a vector extract (or mfvsrld).  Assume vector operation cost.
1025       // The cost of the load constant for a vector extract is disregarded
1026       // (invariant, easily schedulable).
1027       return vectorCostAdjustment(1, Opcode, Val, nullptr);
1028 
1029     } else if (ST->hasDirectMove())
1030       // Assume permute has standard cost.
1031       // Assume move-to/move-from VSR have 2x standard cost.
1032       return 3;
1033   }
1034 
1035   // Estimated cost of a load-hit-store delay.  This was obtained
1036   // experimentally as a minimum needed to prevent unprofitable
1037   // vectorization for the paq8p benchmark.  It may need to be
1038   // raised further if other unprofitable cases remain.
1039   unsigned LHSPenalty = 2;
1040   if (ISD == ISD::INSERT_VECTOR_ELT)
1041     LHSPenalty += 7;
1042 
1043   // Vector element insert/extract with Altivec is very expensive,
1044   // because they require store and reload with the attendant
1045   // processor stall for load-hit-store.  Until VSX is available,
1046   // these need to be estimated as very costly.
1047   if (ISD == ISD::EXTRACT_VECTOR_ELT ||
1048       ISD == ISD::INSERT_VECTOR_ELT)
1049     return LHSPenalty + Cost;
1050 
1051   return Cost;
1052 }
1053 
1054 int PPCTTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src,
1055                                 MaybeAlign Alignment, unsigned AddressSpace,
1056                                 TTI::TargetCostKind CostKind,
1057                                 const Instruction *I) {
1058   if (TLI->getValueType(DL, Src,  true) == MVT::Other)
1059     return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
1060                                   CostKind);
1061   // Legalize the type.
1062   std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Src);
1063   assert((Opcode == Instruction::Load || Opcode == Instruction::Store) &&
1064          "Invalid Opcode");
1065 
1066   int Cost = BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
1067                                     CostKind);
1068   // TODO: Handle other cost kinds.
1069   if (CostKind != TTI::TCK_RecipThroughput)
1070     return Cost;
1071 
1072   Cost = vectorCostAdjustment(Cost, Opcode, Src, nullptr);
1073 
1074   bool IsAltivecType = ST->hasAltivec() &&
1075                        (LT.second == MVT::v16i8 || LT.second == MVT::v8i16 ||
1076                         LT.second == MVT::v4i32 || LT.second == MVT::v4f32);
1077   bool IsVSXType = ST->hasVSX() &&
1078                    (LT.second == MVT::v2f64 || LT.second == MVT::v2i64);
1079 
1080   // VSX has 32b/64b load instructions. Legalization can handle loading of
1081   // 32b/64b to VSR correctly and cheaply. But BaseT::getMemoryOpCost and
1082   // PPCTargetLowering can't compute the cost appropriately. So here we
1083   // explicitly check this case.
1084   unsigned MemBytes = Src->getPrimitiveSizeInBits();
1085   if (Opcode == Instruction::Load && ST->hasVSX() && IsAltivecType &&
1086       (MemBytes == 64 || (ST->hasP8Vector() && MemBytes == 32)))
1087     return 1;
1088 
1089   // Aligned loads and stores are easy.
1090   unsigned SrcBytes = LT.second.getStoreSize();
1091   if (!SrcBytes || !Alignment || *Alignment >= SrcBytes)
1092     return Cost;
1093 
1094   // If we can use the permutation-based load sequence, then this is also
1095   // relatively cheap (not counting loop-invariant instructions): one load plus
1096   // one permute (the last load in a series has extra cost, but we're
1097   // neglecting that here). Note that on the P7, we could do unaligned loads
1098   // for Altivec types using the VSX instructions, but that's more expensive
1099   // than using the permutation-based load sequence. On the P8, that's no
1100   // longer true.
1101   if (Opcode == Instruction::Load && (!ST->hasP8Vector() && IsAltivecType) &&
1102       *Alignment >= LT.second.getScalarType().getStoreSize())
1103     return Cost + LT.first; // Add the cost of the permutations.
1104 
1105   // For VSX, we can do unaligned loads and stores on Altivec/VSX types. On the
1106   // P7, unaligned vector loads are more expensive than the permutation-based
1107   // load sequence, so that might be used instead, but regardless, the net cost
1108   // is about the same (not counting loop-invariant instructions).
1109   if (IsVSXType || (ST->hasVSX() && IsAltivecType))
1110     return Cost;
1111 
1112   // Newer PPC supports unaligned memory access.
1113   if (TLI->allowsMisalignedMemoryAccesses(LT.second, 0))
1114     return Cost;
1115 
1116   // PPC in general does not support unaligned loads and stores. They'll need
1117   // to be decomposed based on the alignment factor.
1118 
1119   // Add the cost of each scalar load or store.
1120   assert(Alignment);
1121   Cost += LT.first * ((SrcBytes / Alignment->value()) - 1);
1122 
1123   // For a vector type, there is also scalarization overhead (only for
1124   // stores, loads are expanded using the vector-load + permutation sequence,
1125   // which is much less expensive).
1126   if (Src->isVectorTy() && Opcode == Instruction::Store)
1127     for (int i = 0, e = cast<FixedVectorType>(Src)->getNumElements(); i < e;
1128          ++i)
1129       Cost += getVectorInstrCost(Instruction::ExtractElement, Src, i);
1130 
1131   return Cost;
1132 }
1133 
1134 int PPCTTIImpl::getInterleavedMemoryOpCost(
1135     unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
1136     Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
1137     bool UseMaskForCond, bool UseMaskForGaps) {
1138   if (UseMaskForCond || UseMaskForGaps)
1139     return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
1140                                              Alignment, AddressSpace, CostKind,
1141                                              UseMaskForCond, UseMaskForGaps);
1142 
1143   assert(isa<VectorType>(VecTy) &&
1144          "Expect a vector type for interleaved memory op");
1145 
1146   // Legalize the type.
1147   std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, VecTy);
1148 
1149   // Firstly, the cost of load/store operation.
1150   int Cost =
1151       getMemoryOpCost(Opcode, VecTy, MaybeAlign(Alignment), AddressSpace,
1152                       CostKind);
1153 
1154   // PPC, for both Altivec/VSX, support cheap arbitrary permutations
1155   // (at least in the sense that there need only be one non-loop-invariant
1156   // instruction). For each result vector, we need one shuffle per incoming
1157   // vector (except that the first shuffle can take two incoming vectors
1158   // because it does not need to take itself).
1159   Cost += Factor*(LT.first-1);
1160 
1161   return Cost;
1162 }
1163 
1164 unsigned PPCTTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
1165                                            TTI::TargetCostKind CostKind) {
1166   return BaseT::getIntrinsicInstrCost(ICA, CostKind);
1167 }
1168 
1169 bool PPCTTIImpl::canSaveCmp(Loop *L, BranchInst **BI, ScalarEvolution *SE,
1170                             LoopInfo *LI, DominatorTree *DT,
1171                             AssumptionCache *AC, TargetLibraryInfo *LibInfo) {
1172   // Process nested loops first.
1173   for (Loop::iterator I = L->begin(), E = L->end(); I != E; ++I)
1174     if (canSaveCmp(*I, BI, SE, LI, DT, AC, LibInfo))
1175       return false; // Stop search.
1176 
1177   HardwareLoopInfo HWLoopInfo(L);
1178 
1179   if (!HWLoopInfo.canAnalyze(*LI))
1180     return false;
1181 
1182   if (!isHardwareLoopProfitable(L, *SE, *AC, LibInfo, HWLoopInfo))
1183     return false;
1184 
1185   if (!HWLoopInfo.isHardwareLoopCandidate(*SE, *LI, *DT))
1186     return false;
1187 
1188   *BI = HWLoopInfo.ExitBranch;
1189   return true;
1190 }
1191 
1192 bool PPCTTIImpl::isLSRCostLess(TargetTransformInfo::LSRCost &C1,
1193                                TargetTransformInfo::LSRCost &C2) {
1194   // PowerPC default behaviour here is "instruction number 1st priority".
1195   // If LsrNoInsnsCost is set, call default implementation.
1196   if (!LsrNoInsnsCost)
1197     return std::tie(C1.Insns, C1.NumRegs, C1.AddRecCost, C1.NumIVMuls,
1198                     C1.NumBaseAdds, C1.ScaleCost, C1.ImmCost, C1.SetupCost) <
1199            std::tie(C2.Insns, C2.NumRegs, C2.AddRecCost, C2.NumIVMuls,
1200                     C2.NumBaseAdds, C2.ScaleCost, C2.ImmCost, C2.SetupCost);
1201   else
1202     return TargetTransformInfoImplBase::isLSRCostLess(C1, C2);
1203 }
1204