1 //===-- PPCTargetTransformInfo.cpp - PPC specific TTI ---------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "PPCTargetTransformInfo.h"
10 #include "llvm/Analysis/CodeMetrics.h"
11 #include "llvm/Analysis/TargetTransformInfo.h"
12 #include "llvm/CodeGen/BasicTTIImpl.h"
13 #include "llvm/CodeGen/CostTable.h"
14 #include "llvm/CodeGen/TargetLowering.h"
15 #include "llvm/CodeGen/TargetSchedule.h"
16 #include "llvm/Support/CommandLine.h"
17 #include "llvm/Support/Debug.h"
18 using namespace llvm;
19 
20 #define DEBUG_TYPE "ppctti"
21 
22 static cl::opt<bool> DisablePPCConstHoist("disable-ppc-constant-hoisting",
23 cl::desc("disable constant hoisting on PPC"), cl::init(false), cl::Hidden);
24 
25 // This is currently only used for the data prefetch pass which is only enabled
26 // for BG/Q by default.
27 static cl::opt<unsigned>
28 CacheLineSize("ppc-loop-prefetch-cache-line", cl::Hidden, cl::init(64),
29               cl::desc("The loop prefetch cache line size"));
30 
31 static cl::opt<bool>
32 EnablePPCColdCC("ppc-enable-coldcc", cl::Hidden, cl::init(false),
33                 cl::desc("Enable using coldcc calling conv for cold "
34                          "internal functions"));
35 
36 // The latency of mtctr is only justified if there are more than 4
37 // comparisons that will be removed as a result.
38 static cl::opt<unsigned>
39 SmallCTRLoopThreshold("min-ctr-loop-threshold", cl::init(4), cl::Hidden,
40                       cl::desc("Loops with a constant trip count smaller than "
41                                "this value will not use the count register."));
42 
43 //===----------------------------------------------------------------------===//
44 //
45 // PPC cost model.
46 //
47 //===----------------------------------------------------------------------===//
48 
49 TargetTransformInfo::PopcntSupportKind
50 PPCTTIImpl::getPopcntSupport(unsigned TyWidth) {
51   assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2");
52   if (ST->hasPOPCNTD() != PPCSubtarget::POPCNTD_Unavailable && TyWidth <= 64)
53     return ST->hasPOPCNTD() == PPCSubtarget::POPCNTD_Slow ?
54              TTI::PSK_SlowHardware : TTI::PSK_FastHardware;
55   return TTI::PSK_Software;
56 }
57 
58 int PPCTTIImpl::getIntImmCost(const APInt &Imm, Type *Ty) {
59   if (DisablePPCConstHoist)
60     return BaseT::getIntImmCost(Imm, Ty);
61 
62   assert(Ty->isIntegerTy());
63 
64   unsigned BitSize = Ty->getPrimitiveSizeInBits();
65   if (BitSize == 0)
66     return ~0U;
67 
68   if (Imm == 0)
69     return TTI::TCC_Free;
70 
71   if (Imm.getBitWidth() <= 64) {
72     if (isInt<16>(Imm.getSExtValue()))
73       return TTI::TCC_Basic;
74 
75     if (isInt<32>(Imm.getSExtValue())) {
76       // A constant that can be materialized using lis.
77       if ((Imm.getZExtValue() & 0xFFFF) == 0)
78         return TTI::TCC_Basic;
79 
80       return 2 * TTI::TCC_Basic;
81     }
82   }
83 
84   return 4 * TTI::TCC_Basic;
85 }
86 
87 int PPCTTIImpl::getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm,
88                               Type *Ty) {
89   if (DisablePPCConstHoist)
90     return BaseT::getIntImmCost(IID, Idx, Imm, Ty);
91 
92   assert(Ty->isIntegerTy());
93 
94   unsigned BitSize = Ty->getPrimitiveSizeInBits();
95   if (BitSize == 0)
96     return ~0U;
97 
98   switch (IID) {
99   default:
100     return TTI::TCC_Free;
101   case Intrinsic::sadd_with_overflow:
102   case Intrinsic::uadd_with_overflow:
103   case Intrinsic::ssub_with_overflow:
104   case Intrinsic::usub_with_overflow:
105     if ((Idx == 1) && Imm.getBitWidth() <= 64 && isInt<16>(Imm.getSExtValue()))
106       return TTI::TCC_Free;
107     break;
108   case Intrinsic::experimental_stackmap:
109     if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
110       return TTI::TCC_Free;
111     break;
112   case Intrinsic::experimental_patchpoint_void:
113   case Intrinsic::experimental_patchpoint_i64:
114     if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
115       return TTI::TCC_Free;
116     break;
117   }
118   return PPCTTIImpl::getIntImmCost(Imm, Ty);
119 }
120 
121 int PPCTTIImpl::getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm,
122                               Type *Ty) {
123   if (DisablePPCConstHoist)
124     return BaseT::getIntImmCost(Opcode, Idx, Imm, Ty);
125 
126   assert(Ty->isIntegerTy());
127 
128   unsigned BitSize = Ty->getPrimitiveSizeInBits();
129   if (BitSize == 0)
130     return ~0U;
131 
132   unsigned ImmIdx = ~0U;
133   bool ShiftedFree = false, RunFree = false, UnsignedFree = false,
134        ZeroFree = false;
135   switch (Opcode) {
136   default:
137     return TTI::TCC_Free;
138   case Instruction::GetElementPtr:
139     // Always hoist the base address of a GetElementPtr. This prevents the
140     // creation of new constants for every base constant that gets constant
141     // folded with the offset.
142     if (Idx == 0)
143       return 2 * TTI::TCC_Basic;
144     return TTI::TCC_Free;
145   case Instruction::And:
146     RunFree = true; // (for the rotate-and-mask instructions)
147     LLVM_FALLTHROUGH;
148   case Instruction::Add:
149   case Instruction::Or:
150   case Instruction::Xor:
151     ShiftedFree = true;
152     LLVM_FALLTHROUGH;
153   case Instruction::Sub:
154   case Instruction::Mul:
155   case Instruction::Shl:
156   case Instruction::LShr:
157   case Instruction::AShr:
158     ImmIdx = 1;
159     break;
160   case Instruction::ICmp:
161     UnsignedFree = true;
162     ImmIdx = 1;
163     // Zero comparisons can use record-form instructions.
164     LLVM_FALLTHROUGH;
165   case Instruction::Select:
166     ZeroFree = true;
167     break;
168   case Instruction::PHI:
169   case Instruction::Call:
170   case Instruction::Ret:
171   case Instruction::Load:
172   case Instruction::Store:
173     break;
174   }
175 
176   if (ZeroFree && Imm == 0)
177     return TTI::TCC_Free;
178 
179   if (Idx == ImmIdx && Imm.getBitWidth() <= 64) {
180     if (isInt<16>(Imm.getSExtValue()))
181       return TTI::TCC_Free;
182 
183     if (RunFree) {
184       if (Imm.getBitWidth() <= 32 &&
185           (isShiftedMask_32(Imm.getZExtValue()) ||
186            isShiftedMask_32(~Imm.getZExtValue())))
187         return TTI::TCC_Free;
188 
189       if (ST->isPPC64() &&
190           (isShiftedMask_64(Imm.getZExtValue()) ||
191            isShiftedMask_64(~Imm.getZExtValue())))
192         return TTI::TCC_Free;
193     }
194 
195     if (UnsignedFree && isUInt<16>(Imm.getZExtValue()))
196       return TTI::TCC_Free;
197 
198     if (ShiftedFree && (Imm.getZExtValue() & 0xFFFF) == 0)
199       return TTI::TCC_Free;
200   }
201 
202   return PPCTTIImpl::getIntImmCost(Imm, Ty);
203 }
204 
205 unsigned PPCTTIImpl::getUserCost(const User *U,
206                                  ArrayRef<const Value *> Operands) {
207   if (U->getType()->isVectorTy()) {
208     // Instructions that need to be split should cost more.
209     std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, U->getType());
210     return LT.first * BaseT::getUserCost(U, Operands);
211   }
212 
213   return BaseT::getUserCost(U, Operands);
214 }
215 
216 bool PPCTTIImpl::mightUseCTR(BasicBlock *BB,
217                              TargetLibraryInfo *LibInfo) {
218   const PPCTargetMachine &TM = ST->getTargetMachine();
219 
220   // Loop through the inline asm constraints and look for something that
221   // clobbers ctr.
222   auto asmClobbersCTR = [](InlineAsm *IA) {
223     InlineAsm::ConstraintInfoVector CIV = IA->ParseConstraints();
224     for (unsigned i = 0, ie = CIV.size(); i < ie; ++i) {
225       InlineAsm::ConstraintInfo &C = CIV[i];
226       if (C.Type != InlineAsm::isInput)
227         for (unsigned j = 0, je = C.Codes.size(); j < je; ++j)
228           if (StringRef(C.Codes[j]).equals_lower("{ctr}"))
229             return true;
230     }
231     return false;
232   };
233 
234   // Determining the address of a TLS variable results in a function call in
235   // certain TLS models.
236   std::function<bool(const Value*)> memAddrUsesCTR =
237     [&memAddrUsesCTR, &TM](const Value *MemAddr) -> bool {
238     const auto *GV = dyn_cast<GlobalValue>(MemAddr);
239     if (!GV) {
240       // Recurse to check for constants that refer to TLS global variables.
241       if (const auto *CV = dyn_cast<Constant>(MemAddr))
242         for (const auto &CO : CV->operands())
243           if (memAddrUsesCTR(CO))
244             return true;
245 
246       return false;
247     }
248 
249     if (!GV->isThreadLocal())
250       return false;
251     TLSModel::Model Model = TM.getTLSModel(GV);
252     return Model == TLSModel::GeneralDynamic ||
253       Model == TLSModel::LocalDynamic;
254   };
255 
256   auto isLargeIntegerTy = [](bool Is32Bit, Type *Ty) {
257     if (IntegerType *ITy = dyn_cast<IntegerType>(Ty))
258       return ITy->getBitWidth() > (Is32Bit ? 32U : 64U);
259 
260     return false;
261   };
262 
263   for (BasicBlock::iterator J = BB->begin(), JE = BB->end();
264        J != JE; ++J) {
265     if (CallInst *CI = dyn_cast<CallInst>(J)) {
266       // Inline ASM is okay, unless it clobbers the ctr register.
267       if (InlineAsm *IA = dyn_cast<InlineAsm>(CI->getCalledValue())) {
268         if (asmClobbersCTR(IA))
269           return true;
270         continue;
271       }
272 
273       if (Function *F = CI->getCalledFunction()) {
274         // Most intrinsics don't become function calls, but some might.
275         // sin, cos, exp and log are always calls.
276         unsigned Opcode = 0;
277         if (F->getIntrinsicID() != Intrinsic::not_intrinsic) {
278           switch (F->getIntrinsicID()) {
279           default: continue;
280           // If we have a call to ppc_is_decremented_ctr_nonzero, or ppc_mtctr
281           // we're definitely using CTR.
282           case Intrinsic::set_loop_iterations:
283           case Intrinsic::loop_decrement:
284             return true;
285 
286 // VisualStudio defines setjmp as _setjmp
287 #if defined(_MSC_VER) && defined(setjmp) && \
288                        !defined(setjmp_undefined_for_msvc)
289 #  pragma push_macro("setjmp")
290 #  undef setjmp
291 #  define setjmp_undefined_for_msvc
292 #endif
293 
294           case Intrinsic::setjmp:
295 
296 #if defined(_MSC_VER) && defined(setjmp_undefined_for_msvc)
297  // let's return it to _setjmp state
298 #  pragma pop_macro("setjmp")
299 #  undef setjmp_undefined_for_msvc
300 #endif
301 
302           case Intrinsic::longjmp:
303 
304           // Exclude eh_sjlj_setjmp; we don't need to exclude eh_sjlj_longjmp
305           // because, although it does clobber the counter register, the
306           // control can't then return to inside the loop unless there is also
307           // an eh_sjlj_setjmp.
308           case Intrinsic::eh_sjlj_setjmp:
309 
310           case Intrinsic::memcpy:
311           case Intrinsic::memmove:
312           case Intrinsic::memset:
313           case Intrinsic::powi:
314           case Intrinsic::log:
315           case Intrinsic::log2:
316           case Intrinsic::log10:
317           case Intrinsic::exp:
318           case Intrinsic::exp2:
319           case Intrinsic::pow:
320           case Intrinsic::sin:
321           case Intrinsic::cos:
322             return true;
323           case Intrinsic::copysign:
324             if (CI->getArgOperand(0)->getType()->getScalarType()->
325                 isPPC_FP128Ty())
326               return true;
327             else
328               continue; // ISD::FCOPYSIGN is never a library call.
329           case Intrinsic::sqrt:               Opcode = ISD::FSQRT;      break;
330           case Intrinsic::floor:              Opcode = ISD::FFLOOR;     break;
331           case Intrinsic::ceil:               Opcode = ISD::FCEIL;      break;
332           case Intrinsic::trunc:              Opcode = ISD::FTRUNC;     break;
333           case Intrinsic::rint:               Opcode = ISD::FRINT;      break;
334           case Intrinsic::nearbyint:          Opcode = ISD::FNEARBYINT; break;
335           case Intrinsic::round:              Opcode = ISD::FROUND;     break;
336           case Intrinsic::minnum:             Opcode = ISD::FMINNUM;    break;
337           case Intrinsic::maxnum:             Opcode = ISD::FMAXNUM;    break;
338           case Intrinsic::umul_with_overflow: Opcode = ISD::UMULO;      break;
339           case Intrinsic::smul_with_overflow: Opcode = ISD::SMULO;      break;
340           }
341         }
342 
343         // PowerPC does not use [US]DIVREM or other library calls for
344         // operations on regular types which are not otherwise library calls
345         // (i.e. soft float or atomics). If adapting for targets that do,
346         // additional care is required here.
347 
348         LibFunc Func;
349         if (!F->hasLocalLinkage() && F->hasName() && LibInfo &&
350             LibInfo->getLibFunc(F->getName(), Func) &&
351             LibInfo->hasOptimizedCodeGen(Func)) {
352           // Non-read-only functions are never treated as intrinsics.
353           if (!CI->onlyReadsMemory())
354             return true;
355 
356           // Conversion happens only for FP calls.
357           if (!CI->getArgOperand(0)->getType()->isFloatingPointTy())
358             return true;
359 
360           switch (Func) {
361           default: return true;
362           case LibFunc_copysign:
363           case LibFunc_copysignf:
364             continue; // ISD::FCOPYSIGN is never a library call.
365           case LibFunc_copysignl:
366             return true;
367           case LibFunc_fabs:
368           case LibFunc_fabsf:
369           case LibFunc_fabsl:
370             continue; // ISD::FABS is never a library call.
371           case LibFunc_sqrt:
372           case LibFunc_sqrtf:
373           case LibFunc_sqrtl:
374             Opcode = ISD::FSQRT; break;
375           case LibFunc_floor:
376           case LibFunc_floorf:
377           case LibFunc_floorl:
378             Opcode = ISD::FFLOOR; break;
379           case LibFunc_nearbyint:
380           case LibFunc_nearbyintf:
381           case LibFunc_nearbyintl:
382             Opcode = ISD::FNEARBYINT; break;
383           case LibFunc_ceil:
384           case LibFunc_ceilf:
385           case LibFunc_ceill:
386             Opcode = ISD::FCEIL; break;
387           case LibFunc_rint:
388           case LibFunc_rintf:
389           case LibFunc_rintl:
390             Opcode = ISD::FRINT; break;
391           case LibFunc_round:
392           case LibFunc_roundf:
393           case LibFunc_roundl:
394             Opcode = ISD::FROUND; break;
395           case LibFunc_trunc:
396           case LibFunc_truncf:
397           case LibFunc_truncl:
398             Opcode = ISD::FTRUNC; break;
399           case LibFunc_fmin:
400           case LibFunc_fminf:
401           case LibFunc_fminl:
402             Opcode = ISD::FMINNUM; break;
403           case LibFunc_fmax:
404           case LibFunc_fmaxf:
405           case LibFunc_fmaxl:
406             Opcode = ISD::FMAXNUM; break;
407           }
408         }
409 
410         if (Opcode) {
411           EVT EVTy =
412               TLI->getValueType(DL, CI->getArgOperand(0)->getType(), true);
413 
414           if (EVTy == MVT::Other)
415             return true;
416 
417           if (TLI->isOperationLegalOrCustom(Opcode, EVTy))
418             continue;
419           else if (EVTy.isVector() &&
420                    TLI->isOperationLegalOrCustom(Opcode, EVTy.getScalarType()))
421             continue;
422 
423           return true;
424         }
425       }
426 
427       return true;
428     } else if (isa<BinaryOperator>(J) &&
429                J->getType()->getScalarType()->isPPC_FP128Ty()) {
430       // Most operations on ppc_f128 values become calls.
431       return true;
432     } else if (isa<UIToFPInst>(J) || isa<SIToFPInst>(J) ||
433                isa<FPToUIInst>(J) || isa<FPToSIInst>(J)) {
434       CastInst *CI = cast<CastInst>(J);
435       if (CI->getSrcTy()->getScalarType()->isPPC_FP128Ty() ||
436           CI->getDestTy()->getScalarType()->isPPC_FP128Ty() ||
437           isLargeIntegerTy(!TM.isPPC64(), CI->getSrcTy()->getScalarType()) ||
438           isLargeIntegerTy(!TM.isPPC64(), CI->getDestTy()->getScalarType()))
439         return true;
440     } else if (isLargeIntegerTy(!TM.isPPC64(),
441                                 J->getType()->getScalarType()) &&
442                (J->getOpcode() == Instruction::UDiv ||
443                 J->getOpcode() == Instruction::SDiv ||
444                 J->getOpcode() == Instruction::URem ||
445                 J->getOpcode() == Instruction::SRem)) {
446       return true;
447     } else if (!TM.isPPC64() &&
448                isLargeIntegerTy(false, J->getType()->getScalarType()) &&
449                (J->getOpcode() == Instruction::Shl ||
450                 J->getOpcode() == Instruction::AShr ||
451                 J->getOpcode() == Instruction::LShr)) {
452       // Only on PPC32, for 128-bit integers (specifically not 64-bit
453       // integers), these might be runtime calls.
454       return true;
455     } else if (isa<IndirectBrInst>(J) || isa<InvokeInst>(J)) {
456       // On PowerPC, indirect jumps use the counter register.
457       return true;
458     } else if (SwitchInst *SI = dyn_cast<SwitchInst>(J)) {
459       if (SI->getNumCases() + 1 >= (unsigned)TLI->getMinimumJumpTableEntries())
460         return true;
461     }
462 
463     // FREM is always a call.
464     if (J->getOpcode() == Instruction::FRem)
465       return true;
466 
467     if (ST->useSoftFloat()) {
468       switch(J->getOpcode()) {
469       case Instruction::FAdd:
470       case Instruction::FSub:
471       case Instruction::FMul:
472       case Instruction::FDiv:
473       case Instruction::FPTrunc:
474       case Instruction::FPExt:
475       case Instruction::FPToUI:
476       case Instruction::FPToSI:
477       case Instruction::UIToFP:
478       case Instruction::SIToFP:
479       case Instruction::FCmp:
480         return true;
481       }
482     }
483 
484     for (Value *Operand : J->operands())
485       if (memAddrUsesCTR(Operand))
486         return true;
487   }
488 
489   return false;
490 }
491 
492 bool PPCTTIImpl::isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE,
493                                           AssumptionCache &AC,
494                                           TargetLibraryInfo *LibInfo,
495                                           HardwareLoopInfo &HWLoopInfo) {
496   const PPCTargetMachine &TM = ST->getTargetMachine();
497   TargetSchedModel SchedModel;
498   SchedModel.init(ST);
499 
500   // Do not convert small short loops to CTR loop.
501   unsigned ConstTripCount = SE.getSmallConstantTripCount(L);
502   if (ConstTripCount && ConstTripCount < SmallCTRLoopThreshold) {
503     SmallPtrSet<const Value *, 32> EphValues;
504     CodeMetrics::collectEphemeralValues(L, &AC, EphValues);
505     CodeMetrics Metrics;
506     for (BasicBlock *BB : L->blocks())
507       Metrics.analyzeBasicBlock(BB, *this, EphValues);
508     // 6 is an approximate latency for the mtctr instruction.
509     if (Metrics.NumInsts <= (6 * SchedModel.getIssueWidth()))
510       return false;
511   }
512 
513   // We don't want to spill/restore the counter register, and so we don't
514   // want to use the counter register if the loop contains calls.
515   for (Loop::block_iterator I = L->block_begin(), IE = L->block_end();
516        I != IE; ++I)
517     if (mightUseCTR(*I, LibInfo))
518       return false;
519 
520   SmallVector<BasicBlock*, 4> ExitingBlocks;
521   L->getExitingBlocks(ExitingBlocks);
522 
523   // If there is an exit edge known to be frequently taken,
524   // we should not transform this loop.
525   for (auto &BB : ExitingBlocks) {
526     Instruction *TI = BB->getTerminator();
527     if (!TI) continue;
528 
529     if (BranchInst *BI = dyn_cast<BranchInst>(TI)) {
530       uint64_t TrueWeight = 0, FalseWeight = 0;
531       if (!BI->isConditional() ||
532           !BI->extractProfMetadata(TrueWeight, FalseWeight))
533         continue;
534 
535       // If the exit path is more frequent than the loop path,
536       // we return here without further analysis for this loop.
537       bool TrueIsExit = !L->contains(BI->getSuccessor(0));
538       if (( TrueIsExit && FalseWeight < TrueWeight) ||
539           (!TrueIsExit && FalseWeight > TrueWeight))
540         return false;
541     }
542   }
543 
544   LLVMContext &C = L->getHeader()->getContext();
545   HWLoopInfo.CountType = TM.isPPC64() ?
546     Type::getInt64Ty(C) : Type::getInt32Ty(C);
547   HWLoopInfo.LoopDecrement = ConstantInt::get(HWLoopInfo.CountType, 1);
548   return true;
549 }
550 
551 void PPCTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
552                                          TTI::UnrollingPreferences &UP) {
553   if (ST->getDarwinDirective() == PPC::DIR_A2) {
554     // The A2 is in-order with a deep pipeline, and concatenation unrolling
555     // helps expose latency-hiding opportunities to the instruction scheduler.
556     UP.Partial = UP.Runtime = true;
557 
558     // We unroll a lot on the A2 (hundreds of instructions), and the benefits
559     // often outweigh the cost of a division to compute the trip count.
560     UP.AllowExpensiveTripCount = true;
561   }
562 
563   BaseT::getUnrollingPreferences(L, SE, UP);
564 }
565 
566 // This function returns true to allow using coldcc calling convention.
567 // Returning true results in coldcc being used for functions which are cold at
568 // all call sites when the callers of the functions are not calling any other
569 // non coldcc functions.
570 bool PPCTTIImpl::useColdCCForColdCall(Function &F) {
571   return EnablePPCColdCC;
572 }
573 
574 bool PPCTTIImpl::enableAggressiveInterleaving(bool LoopHasReductions) {
575   // On the A2, always unroll aggressively. For QPX unaligned loads, we depend
576   // on combining the loads generated for consecutive accesses, and failure to
577   // do so is particularly expensive. This makes it much more likely (compared
578   // to only using concatenation unrolling).
579   if (ST->getDarwinDirective() == PPC::DIR_A2)
580     return true;
581 
582   return LoopHasReductions;
583 }
584 
585 PPCTTIImpl::TTI::MemCmpExpansionOptions
586 PPCTTIImpl::enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const {
587   TTI::MemCmpExpansionOptions Options;
588   Options.LoadSizes = {8, 4, 2, 1};
589   Options.MaxNumLoads = TLI->getMaxExpandSizeMemcmp(OptSize);
590   return Options;
591 }
592 
593 bool PPCTTIImpl::enableInterleavedAccessVectorization() {
594   return true;
595 }
596 
597 unsigned PPCTTIImpl::getNumberOfRegisters(unsigned ClassID) const {
598   assert(ClassID == GPRRC || ClassID == FPRRC ||
599          ClassID == VRRC || ClassID == VSXRC);
600   if (ST->hasVSX()) {
601     assert(ClassID == GPRRC || ClassID == VSXRC);
602     return ClassID == GPRRC ? 32 : 64;
603   }
604   assert(ClassID == GPRRC || ClassID == FPRRC || ClassID == VRRC);
605   return 32;
606 }
607 
608 unsigned PPCTTIImpl::getRegisterClassForType(bool Vector, Type *Ty) const {
609   if (Vector)
610     return ST->hasVSX() ? VSXRC : VRRC;
611   else if (Ty && Ty->getScalarType()->isFloatTy())
612     return ST->hasVSX() ? VSXRC : FPRRC;
613   else
614     return GPRRC;
615 }
616 
617 const char* PPCTTIImpl::getRegisterClassName(unsigned ClassID) const {
618 
619   switch (ClassID) {
620     default:
621       llvm_unreachable("unknown register class");
622       return "PPC::unknown register class";
623     case GPRRC:       return "PPC::GPRRC";
624     case FPRRC:       return "PPC::FPRRC";
625     case VRRC:        return "PPC::VRRC";
626     case VSXRC:       return "PPC::VSXRC";
627   }
628 }
629 
630 unsigned PPCTTIImpl::getRegisterBitWidth(bool Vector) const {
631   if (Vector) {
632     if (ST->hasQPX()) return 256;
633     if (ST->hasAltivec()) return 128;
634     return 0;
635   }
636 
637   if (ST->isPPC64())
638     return 64;
639   return 32;
640 
641 }
642 
643 unsigned PPCTTIImpl::getCacheLineSize() const {
644   // Check first if the user specified a custom line size.
645   if (CacheLineSize.getNumOccurrences() > 0)
646     return CacheLineSize;
647 
648   // On P7, P8 or P9 we have a cache line size of 128.
649   unsigned Directive = ST->getDarwinDirective();
650   if (Directive == PPC::DIR_PWR7 || Directive == PPC::DIR_PWR8 ||
651       Directive == PPC::DIR_PWR9)
652     return 128;
653 
654   // On other processors return a default of 64 bytes.
655   return 64;
656 }
657 
658 unsigned PPCTTIImpl::getPrefetchDistance() const {
659   // This seems like a reasonable default for the BG/Q (this pass is enabled, by
660   // default, only on the BG/Q).
661   return 300;
662 }
663 
664 unsigned PPCTTIImpl::getMaxInterleaveFactor(unsigned VF) {
665   unsigned Directive = ST->getDarwinDirective();
666   // The 440 has no SIMD support, but floating-point instructions
667   // have a 5-cycle latency, so unroll by 5x for latency hiding.
668   if (Directive == PPC::DIR_440)
669     return 5;
670 
671   // The A2 has no SIMD support, but floating-point instructions
672   // have a 6-cycle latency, so unroll by 6x for latency hiding.
673   if (Directive == PPC::DIR_A2)
674     return 6;
675 
676   // FIXME: For lack of any better information, do no harm...
677   if (Directive == PPC::DIR_E500mc || Directive == PPC::DIR_E5500)
678     return 1;
679 
680   // For P7 and P8, floating-point instructions have a 6-cycle latency and
681   // there are two execution units, so unroll by 12x for latency hiding.
682   // FIXME: the same for P9 as previous gen until POWER9 scheduling is ready
683   if (Directive == PPC::DIR_PWR7 || Directive == PPC::DIR_PWR8 ||
684       Directive == PPC::DIR_PWR9)
685     return 12;
686 
687   // For most things, modern systems have two execution units (and
688   // out-of-order execution).
689   return 2;
690 }
691 
692 // Adjust the cost of vector instructions on targets which there is overlap
693 // between the vector and scalar units, thereby reducing the overall throughput
694 // of vector code wrt. scalar code.
695 int PPCTTIImpl::vectorCostAdjustment(int Cost, unsigned Opcode, Type *Ty1,
696                                      Type *Ty2) {
697   if (!ST->vectorsUseTwoUnits() || !Ty1->isVectorTy())
698     return Cost;
699 
700   std::pair<int, MVT> LT1 = TLI->getTypeLegalizationCost(DL, Ty1);
701   // If type legalization involves splitting the vector, we don't want to
702   // double the cost at every step - only the last step.
703   if (LT1.first != 1 || !LT1.second.isVector())
704     return Cost;
705 
706   int ISD = TLI->InstructionOpcodeToISD(Opcode);
707   if (TLI->isOperationExpand(ISD, LT1.second))
708     return Cost;
709 
710   if (Ty2) {
711     std::pair<int, MVT> LT2 = TLI->getTypeLegalizationCost(DL, Ty2);
712     if (LT2.first != 1 || !LT2.second.isVector())
713       return Cost;
714   }
715 
716   return Cost * 2;
717 }
718 
719 int PPCTTIImpl::getArithmeticInstrCost(
720     unsigned Opcode, Type *Ty, TTI::OperandValueKind Op1Info,
721     TTI::OperandValueKind Op2Info, TTI::OperandValueProperties Opd1PropInfo,
722     TTI::OperandValueProperties Opd2PropInfo, ArrayRef<const Value *> Args) {
723   assert(TLI->InstructionOpcodeToISD(Opcode) && "Invalid opcode");
724 
725   // Fallback to the default implementation.
726   int Cost = BaseT::getArithmeticInstrCost(Opcode, Ty, Op1Info, Op2Info,
727                                            Opd1PropInfo, Opd2PropInfo);
728   return vectorCostAdjustment(Cost, Opcode, Ty, nullptr);
729 }
730 
731 int PPCTTIImpl::getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index,
732                                Type *SubTp) {
733   // Legalize the type.
734   std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
735 
736   // PPC, for both Altivec/VSX and QPX, support cheap arbitrary permutations
737   // (at least in the sense that there need only be one non-loop-invariant
738   // instruction). We need one such shuffle instruction for each actual
739   // register (this is not true for arbitrary shuffles, but is true for the
740   // structured types of shuffles covered by TTI::ShuffleKind).
741   return vectorCostAdjustment(LT.first, Instruction::ShuffleVector, Tp,
742                               nullptr);
743 }
744 
745 int PPCTTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
746                                  const Instruction *I) {
747   assert(TLI->InstructionOpcodeToISD(Opcode) && "Invalid opcode");
748 
749   int Cost = BaseT::getCastInstrCost(Opcode, Dst, Src);
750   return vectorCostAdjustment(Cost, Opcode, Dst, Src);
751 }
752 
753 int PPCTTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy,
754                                    const Instruction *I) {
755   int Cost = BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, I);
756   return vectorCostAdjustment(Cost, Opcode, ValTy, nullptr);
757 }
758 
759 int PPCTTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index) {
760   assert(Val->isVectorTy() && "This must be a vector type");
761 
762   int ISD = TLI->InstructionOpcodeToISD(Opcode);
763   assert(ISD && "Invalid opcode");
764 
765   int Cost = BaseT::getVectorInstrCost(Opcode, Val, Index);
766   Cost = vectorCostAdjustment(Cost, Opcode, Val, nullptr);
767 
768   if (ST->hasVSX() && Val->getScalarType()->isDoubleTy()) {
769     // Double-precision scalars are already located in index #0 (or #1 if LE).
770     if (ISD == ISD::EXTRACT_VECTOR_ELT &&
771         Index == (ST->isLittleEndian() ? 1 : 0))
772       return 0;
773 
774     return Cost;
775 
776   } else if (ST->hasQPX() && Val->getScalarType()->isFloatingPointTy()) {
777     // Floating point scalars are already located in index #0.
778     if (Index == 0)
779       return 0;
780 
781     return Cost;
782 
783   } else if (Val->getScalarType()->isIntegerTy() && Index != -1U) {
784     if (ST->hasP9Altivec()) {
785       if (ISD == ISD::INSERT_VECTOR_ELT)
786         // A move-to VSR and a permute/insert.  Assume vector operation cost
787         // for both (cost will be 2x on P9).
788         return vectorCostAdjustment(2, Opcode, Val, nullptr);
789 
790       // It's an extract.  Maybe we can do a cheap move-from VSR.
791       unsigned EltSize = Val->getScalarSizeInBits();
792       if (EltSize == 64) {
793         unsigned MfvsrdIndex = ST->isLittleEndian() ? 1 : 0;
794         if (Index == MfvsrdIndex)
795           return 1;
796       } else if (EltSize == 32) {
797         unsigned MfvsrwzIndex = ST->isLittleEndian() ? 2 : 1;
798         if (Index == MfvsrwzIndex)
799           return 1;
800       }
801 
802       // We need a vector extract (or mfvsrld).  Assume vector operation cost.
803       // The cost of the load constant for a vector extract is disregarded
804       // (invariant, easily schedulable).
805       return vectorCostAdjustment(1, Opcode, Val, nullptr);
806 
807     } else if (ST->hasDirectMove())
808       // Assume permute has standard cost.
809       // Assume move-to/move-from VSR have 2x standard cost.
810       return 3;
811   }
812 
813   // Estimated cost of a load-hit-store delay.  This was obtained
814   // experimentally as a minimum needed to prevent unprofitable
815   // vectorization for the paq8p benchmark.  It may need to be
816   // raised further if other unprofitable cases remain.
817   unsigned LHSPenalty = 2;
818   if (ISD == ISD::INSERT_VECTOR_ELT)
819     LHSPenalty += 7;
820 
821   // Vector element insert/extract with Altivec is very expensive,
822   // because they require store and reload with the attendant
823   // processor stall for load-hit-store.  Until VSX is available,
824   // these need to be estimated as very costly.
825   if (ISD == ISD::EXTRACT_VECTOR_ELT ||
826       ISD == ISD::INSERT_VECTOR_ELT)
827     return LHSPenalty + Cost;
828 
829   return Cost;
830 }
831 
832 int PPCTTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
833                                 unsigned AddressSpace, const Instruction *I) {
834   // Legalize the type.
835   std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Src);
836   assert((Opcode == Instruction::Load || Opcode == Instruction::Store) &&
837          "Invalid Opcode");
838 
839   int Cost = BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace);
840   Cost = vectorCostAdjustment(Cost, Opcode, Src, nullptr);
841 
842   bool IsAltivecType = ST->hasAltivec() &&
843                        (LT.second == MVT::v16i8 || LT.second == MVT::v8i16 ||
844                         LT.second == MVT::v4i32 || LT.second == MVT::v4f32);
845   bool IsVSXType = ST->hasVSX() &&
846                    (LT.second == MVT::v2f64 || LT.second == MVT::v2i64);
847   bool IsQPXType = ST->hasQPX() &&
848                    (LT.second == MVT::v4f64 || LT.second == MVT::v4f32);
849 
850   // VSX has 32b/64b load instructions. Legalization can handle loading of
851   // 32b/64b to VSR correctly and cheaply. But BaseT::getMemoryOpCost and
852   // PPCTargetLowering can't compute the cost appropriately. So here we
853   // explicitly check this case.
854   unsigned MemBytes = Src->getPrimitiveSizeInBits();
855   if (Opcode == Instruction::Load && ST->hasVSX() && IsAltivecType &&
856       (MemBytes == 64 || (ST->hasP8Vector() && MemBytes == 32)))
857     return 1;
858 
859   // Aligned loads and stores are easy.
860   unsigned SrcBytes = LT.second.getStoreSize();
861   if (!SrcBytes || !Alignment || Alignment >= SrcBytes)
862     return Cost;
863 
864   // If we can use the permutation-based load sequence, then this is also
865   // relatively cheap (not counting loop-invariant instructions): one load plus
866   // one permute (the last load in a series has extra cost, but we're
867   // neglecting that here). Note that on the P7, we could do unaligned loads
868   // for Altivec types using the VSX instructions, but that's more expensive
869   // than using the permutation-based load sequence. On the P8, that's no
870   // longer true.
871   if (Opcode == Instruction::Load &&
872       ((!ST->hasP8Vector() && IsAltivecType) || IsQPXType) &&
873       Alignment >= LT.second.getScalarType().getStoreSize())
874     return Cost + LT.first; // Add the cost of the permutations.
875 
876   // For VSX, we can do unaligned loads and stores on Altivec/VSX types. On the
877   // P7, unaligned vector loads are more expensive than the permutation-based
878   // load sequence, so that might be used instead, but regardless, the net cost
879   // is about the same (not counting loop-invariant instructions).
880   if (IsVSXType || (ST->hasVSX() && IsAltivecType))
881     return Cost;
882 
883   // Newer PPC supports unaligned memory access.
884   if (TLI->allowsMisalignedMemoryAccesses(LT.second, 0))
885     return Cost;
886 
887   // PPC in general does not support unaligned loads and stores. They'll need
888   // to be decomposed based on the alignment factor.
889 
890   // Add the cost of each scalar load or store.
891   Cost += LT.first*(SrcBytes/Alignment-1);
892 
893   // For a vector type, there is also scalarization overhead (only for
894   // stores, loads are expanded using the vector-load + permutation sequence,
895   // which is much less expensive).
896   if (Src->isVectorTy() && Opcode == Instruction::Store)
897     for (int i = 0, e = Src->getVectorNumElements(); i < e; ++i)
898       Cost += getVectorInstrCost(Instruction::ExtractElement, Src, i);
899 
900   return Cost;
901 }
902 
903 int PPCTTIImpl::getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy,
904                                            unsigned Factor,
905                                            ArrayRef<unsigned> Indices,
906                                            unsigned Alignment,
907                                            unsigned AddressSpace,
908                                            bool UseMaskForCond,
909                                            bool UseMaskForGaps) {
910   if (UseMaskForCond || UseMaskForGaps)
911     return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
912                                              Alignment, AddressSpace,
913                                              UseMaskForCond, UseMaskForGaps);
914 
915   assert(isa<VectorType>(VecTy) &&
916          "Expect a vector type for interleaved memory op");
917 
918   // Legalize the type.
919   std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, VecTy);
920 
921   // Firstly, the cost of load/store operation.
922   int Cost = getMemoryOpCost(Opcode, VecTy, Alignment, AddressSpace);
923 
924   // PPC, for both Altivec/VSX and QPX, support cheap arbitrary permutations
925   // (at least in the sense that there need only be one non-loop-invariant
926   // instruction). For each result vector, we need one shuffle per incoming
927   // vector (except that the first shuffle can take two incoming vectors
928   // because it does not need to take itself).
929   Cost += Factor*(LT.first-1);
930 
931   return Cost;
932 }
933 
934 bool PPCTTIImpl::canSaveCmp(Loop *L, BranchInst **BI, ScalarEvolution *SE,
935                             LoopInfo *LI, DominatorTree *DT,
936                             AssumptionCache *AC, TargetLibraryInfo *LibInfo) {
937   // Process nested loops first.
938   for (Loop::iterator I = L->begin(), E = L->end(); I != E; ++I)
939     if (canSaveCmp(*I, BI, SE, LI, DT, AC, LibInfo))
940       return false; // Stop search.
941 
942   HardwareLoopInfo HWLoopInfo(L);
943 
944   if (!HWLoopInfo.canAnalyze(*LI))
945     return false;
946 
947   if (!isHardwareLoopProfitable(L, *SE, *AC, LibInfo, HWLoopInfo))
948     return false;
949 
950   if (!HWLoopInfo.isHardwareLoopCandidate(*SE, *LI, *DT))
951     return false;
952 
953   *BI = HWLoopInfo.ExitBranch;
954   return true;
955 }
956