1 //===- llvm/Analysis/TargetTransformInfo.cpp ------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "llvm/Analysis/TargetTransformInfo.h"
10 #include "llvm/Analysis/CFG.h"
11 #include "llvm/Analysis/LoopIterator.h"
12 #include "llvm/Analysis/TargetTransformInfoImpl.h"
13 #include "llvm/IR/CFG.h"
14 #include "llvm/IR/DataLayout.h"
15 #include "llvm/IR/Dominators.h"
16 #include "llvm/IR/Instruction.h"
17 #include "llvm/IR/Instructions.h"
18 #include "llvm/IR/IntrinsicInst.h"
19 #include "llvm/IR/Module.h"
20 #include "llvm/IR/Operator.h"
21 #include "llvm/IR/PatternMatch.h"
22 #include "llvm/InitializePasses.h"
23 #include "llvm/Support/CommandLine.h"
24 #include "llvm/Support/ErrorHandling.h"
25 #include <utility>
26 
27 using namespace llvm;
28 using namespace PatternMatch;
29 
30 #define DEBUG_TYPE "tti"
31 
32 static cl::opt<bool> EnableReduxCost("costmodel-reduxcost", cl::init(false),
33                                      cl::Hidden,
34                                      cl::desc("Recognize reduction patterns."));
35 
36 namespace {
37 /// No-op implementation of the TTI interface using the utility base
38 /// classes.
39 ///
40 /// This is used when no target specific information is available.
41 struct NoTTIImpl : TargetTransformInfoImplCRTPBase<NoTTIImpl> {
42   explicit NoTTIImpl(const DataLayout &DL)
43       : TargetTransformInfoImplCRTPBase<NoTTIImpl>(DL) {}
44 };
45 } // namespace
46 
47 bool HardwareLoopInfo::canAnalyze(LoopInfo &LI) {
48   // If the loop has irreducible control flow, it can not be converted to
49   // Hardware loop.
50   LoopBlocksRPO RPOT(L);
51   RPOT.perform(&LI);
52   if (containsIrreducibleCFG<const BasicBlock *>(RPOT, LI))
53     return false;
54   return true;
55 }
56 
57 IntrinsicCostAttributes::IntrinsicCostAttributes(const IntrinsicInst &I) :
58     II(&I), RetTy(I.getType()), IID(I.getIntrinsicID()) {
59 
60  FunctionType *FTy = I.getCalledFunction()->getFunctionType();
61  ParamTys.insert(ParamTys.begin(), FTy->param_begin(), FTy->param_end());
62  Arguments.insert(Arguments.begin(), I.arg_begin(), I.arg_end());
63  if (auto *FPMO = dyn_cast<FPMathOperator>(&I))
64    FMF = FPMO->getFastMathFlags();
65 }
66 
67 IntrinsicCostAttributes::IntrinsicCostAttributes(Intrinsic::ID Id,
68                                                  const CallBase &CI) :
69   II(dyn_cast<IntrinsicInst>(&CI)),  RetTy(CI.getType()), IID(Id) {
70 
71   if (const auto *FPMO = dyn_cast<FPMathOperator>(&CI))
72     FMF = FPMO->getFastMathFlags();
73 
74   Arguments.insert(Arguments.begin(), CI.arg_begin(), CI.arg_end());
75   FunctionType *FTy =
76     CI.getCalledFunction()->getFunctionType();
77   ParamTys.insert(ParamTys.begin(), FTy->param_begin(), FTy->param_end());
78 }
79 
80 IntrinsicCostAttributes::IntrinsicCostAttributes(Intrinsic::ID Id,
81                                                  const CallBase &CI,
82                                                  unsigned Factor) :
83     RetTy(CI.getType()), IID(Id), VF(Factor) {
84 
85   if (auto *FPMO = dyn_cast<FPMathOperator>(&CI))
86     FMF = FPMO->getFastMathFlags();
87 
88   Arguments.insert(Arguments.begin(), CI.arg_begin(), CI.arg_end());
89   FunctionType *FTy =
90     CI.getCalledFunction()->getFunctionType();
91   ParamTys.insert(ParamTys.begin(), FTy->param_begin(), FTy->param_end());
92 }
93 
94 IntrinsicCostAttributes::IntrinsicCostAttributes(Intrinsic::ID Id,
95                                                  const CallBase &CI,
96                                                  unsigned Factor,
97                                                  unsigned ScalarCost) :
98     RetTy(CI.getType()), IID(Id), VF(Factor), ScalarizationCost(ScalarCost) {
99 
100   if (const auto *FPMO = dyn_cast<FPMathOperator>(&CI))
101     FMF = FPMO->getFastMathFlags();
102 
103   Arguments.insert(Arguments.begin(), CI.arg_begin(), CI.arg_end());
104   FunctionType *FTy =
105     CI.getCalledFunction()->getFunctionType();
106   ParamTys.insert(ParamTys.begin(), FTy->param_begin(), FTy->param_end());
107 }
108 
109 IntrinsicCostAttributes::IntrinsicCostAttributes(Intrinsic::ID Id, Type *RTy,
110                                                  ArrayRef<Type *> Tys,
111                                                  FastMathFlags Flags) :
112     RetTy(RTy), IID(Id), FMF(Flags) {
113   ParamTys.insert(ParamTys.begin(), Tys.begin(), Tys.end());
114 }
115 
116 IntrinsicCostAttributes::IntrinsicCostAttributes(Intrinsic::ID Id, Type *RTy,
117                                                  ArrayRef<Type *> Tys,
118                                                  FastMathFlags Flags,
119                                                  unsigned ScalarCost) :
120     RetTy(RTy), IID(Id), FMF(Flags), ScalarizationCost(ScalarCost) {
121   ParamTys.insert(ParamTys.begin(), Tys.begin(), Tys.end());
122 }
123 
124 IntrinsicCostAttributes::IntrinsicCostAttributes(Intrinsic::ID Id, Type *RTy,
125                                                  ArrayRef<Type *> Tys,
126                                                  FastMathFlags Flags,
127                                                  unsigned ScalarCost,
128                                                  const IntrinsicInst *I) :
129     II(I), RetTy(RTy), IID(Id), FMF(Flags), ScalarizationCost(ScalarCost) {
130   ParamTys.insert(ParamTys.begin(), Tys.begin(), Tys.end());
131 }
132 
133 IntrinsicCostAttributes::IntrinsicCostAttributes(Intrinsic::ID Id, Type *RTy,
134                                                  ArrayRef<Type *> Tys) :
135     RetTy(RTy), IID(Id) {
136   ParamTys.insert(ParamTys.begin(), Tys.begin(), Tys.end());
137 }
138 
139 IntrinsicCostAttributes::IntrinsicCostAttributes(Intrinsic::ID Id, Type *Ty,
140                                                  ArrayRef<const Value *> Args)
141     : RetTy(Ty), IID(Id) {
142 
143   Arguments.insert(Arguments.begin(), Args.begin(), Args.end());
144   ParamTys.reserve(Arguments.size());
145   for (unsigned Idx = 0, Size = Arguments.size(); Idx != Size; ++Idx)
146     ParamTys.push_back(Arguments[Idx]->getType());
147 }
148 
149 bool HardwareLoopInfo::isHardwareLoopCandidate(ScalarEvolution &SE,
150                                                LoopInfo &LI, DominatorTree &DT,
151                                                bool ForceNestedLoop,
152                                                bool ForceHardwareLoopPHI) {
153   SmallVector<BasicBlock *, 4> ExitingBlocks;
154   L->getExitingBlocks(ExitingBlocks);
155 
156   for (BasicBlock *BB : ExitingBlocks) {
157     // If we pass the updated counter back through a phi, we need to know
158     // which latch the updated value will be coming from.
159     if (!L->isLoopLatch(BB)) {
160       if (ForceHardwareLoopPHI || CounterInReg)
161         continue;
162     }
163 
164     const SCEV *EC = SE.getExitCount(L, BB);
165     if (isa<SCEVCouldNotCompute>(EC))
166       continue;
167     if (const SCEVConstant *ConstEC = dyn_cast<SCEVConstant>(EC)) {
168       if (ConstEC->getValue()->isZero())
169         continue;
170     } else if (!SE.isLoopInvariant(EC, L))
171       continue;
172 
173     if (SE.getTypeSizeInBits(EC->getType()) > CountType->getBitWidth())
174       continue;
175 
176     // If this exiting block is contained in a nested loop, it is not eligible
177     // for insertion of the branch-and-decrement since the inner loop would
178     // end up messing up the value in the CTR.
179     if (!IsNestingLegal && LI.getLoopFor(BB) != L && !ForceNestedLoop)
180       continue;
181 
182     // We now have a loop-invariant count of loop iterations (which is not the
183     // constant zero) for which we know that this loop will not exit via this
184     // existing block.
185 
186     // We need to make sure that this block will run on every loop iteration.
187     // For this to be true, we must dominate all blocks with backedges. Such
188     // blocks are in-loop predecessors to the header block.
189     bool NotAlways = false;
190     for (BasicBlock *Pred : predecessors(L->getHeader())) {
191       if (!L->contains(Pred))
192         continue;
193 
194       if (!DT.dominates(BB, Pred)) {
195         NotAlways = true;
196         break;
197       }
198     }
199 
200     if (NotAlways)
201       continue;
202 
203     // Make sure this blocks ends with a conditional branch.
204     Instruction *TI = BB->getTerminator();
205     if (!TI)
206       continue;
207 
208     if (BranchInst *BI = dyn_cast<BranchInst>(TI)) {
209       if (!BI->isConditional())
210         continue;
211 
212       ExitBranch = BI;
213     } else
214       continue;
215 
216     // Note that this block may not be the loop latch block, even if the loop
217     // has a latch block.
218     ExitBlock = BB;
219     ExitCount = EC;
220     break;
221   }
222 
223   if (!ExitBlock)
224     return false;
225   return true;
226 }
227 
228 TargetTransformInfo::TargetTransformInfo(const DataLayout &DL)
229     : TTIImpl(new Model<NoTTIImpl>(NoTTIImpl(DL))) {}
230 
231 TargetTransformInfo::~TargetTransformInfo() {}
232 
233 TargetTransformInfo::TargetTransformInfo(TargetTransformInfo &&Arg)
234     : TTIImpl(std::move(Arg.TTIImpl)) {}
235 
236 TargetTransformInfo &TargetTransformInfo::operator=(TargetTransformInfo &&RHS) {
237   TTIImpl = std::move(RHS.TTIImpl);
238   return *this;
239 }
240 
241 unsigned TargetTransformInfo::getInliningThresholdMultiplier() const {
242   return TTIImpl->getInliningThresholdMultiplier();
243 }
244 
245 int TargetTransformInfo::getInlinerVectorBonusPercent() const {
246   return TTIImpl->getInlinerVectorBonusPercent();
247 }
248 
249 int TargetTransformInfo::getGEPCost(Type *PointeeType, const Value *Ptr,
250                                     ArrayRef<const Value *> Operands,
251                                     TTI::TargetCostKind CostKind) const {
252   return TTIImpl->getGEPCost(PointeeType, Ptr, Operands, CostKind);
253 }
254 
255 unsigned TargetTransformInfo::getEstimatedNumberOfCaseClusters(
256     const SwitchInst &SI, unsigned &JTSize, ProfileSummaryInfo *PSI,
257     BlockFrequencyInfo *BFI) const {
258   return TTIImpl->getEstimatedNumberOfCaseClusters(SI, JTSize, PSI, BFI);
259 }
260 
261 int TargetTransformInfo::getUserCost(const User *U,
262                                      ArrayRef<const Value *> Operands,
263                                      enum TargetCostKind CostKind) const {
264   int Cost = TTIImpl->getUserCost(U, Operands, CostKind);
265   assert((CostKind == TTI::TCK_RecipThroughput || Cost >= 0) &&
266          "TTI should not produce negative costs!");
267   return Cost;
268 }
269 
270 bool TargetTransformInfo::hasBranchDivergence() const {
271   return TTIImpl->hasBranchDivergence();
272 }
273 
274 bool TargetTransformInfo::useGPUDivergenceAnalysis() const {
275   return TTIImpl->useGPUDivergenceAnalysis();
276 }
277 
278 bool TargetTransformInfo::isSourceOfDivergence(const Value *V) const {
279   return TTIImpl->isSourceOfDivergence(V);
280 }
281 
282 bool llvm::TargetTransformInfo::isAlwaysUniform(const Value *V) const {
283   return TTIImpl->isAlwaysUniform(V);
284 }
285 
286 unsigned TargetTransformInfo::getFlatAddressSpace() const {
287   return TTIImpl->getFlatAddressSpace();
288 }
289 
290 bool TargetTransformInfo::collectFlatAddressOperands(
291     SmallVectorImpl<int> &OpIndexes, Intrinsic::ID IID) const {
292   return TTIImpl->collectFlatAddressOperands(OpIndexes, IID);
293 }
294 
295 bool TargetTransformInfo::isNoopAddrSpaceCast(unsigned FromAS,
296                                               unsigned ToAS) const {
297   return TTIImpl->isNoopAddrSpaceCast(FromAS, ToAS);
298 }
299 
300 unsigned TargetTransformInfo::getAssumedAddrSpace(const Value *V) const {
301   return TTIImpl->getAssumedAddrSpace(V);
302 }
303 
304 Value *TargetTransformInfo::rewriteIntrinsicWithAddressSpace(
305     IntrinsicInst *II, Value *OldV, Value *NewV) const {
306   return TTIImpl->rewriteIntrinsicWithAddressSpace(II, OldV, NewV);
307 }
308 
309 bool TargetTransformInfo::isLoweredToCall(const Function *F) const {
310   return TTIImpl->isLoweredToCall(F);
311 }
312 
313 bool TargetTransformInfo::isHardwareLoopProfitable(
314     Loop *L, ScalarEvolution &SE, AssumptionCache &AC,
315     TargetLibraryInfo *LibInfo, HardwareLoopInfo &HWLoopInfo) const {
316   return TTIImpl->isHardwareLoopProfitable(L, SE, AC, LibInfo, HWLoopInfo);
317 }
318 
319 bool TargetTransformInfo::preferPredicateOverEpilogue(
320     Loop *L, LoopInfo *LI, ScalarEvolution &SE, AssumptionCache &AC,
321     TargetLibraryInfo *TLI, DominatorTree *DT,
322     const LoopAccessInfo *LAI) const {
323   return TTIImpl->preferPredicateOverEpilogue(L, LI, SE, AC, TLI, DT, LAI);
324 }
325 
326 bool TargetTransformInfo::emitGetActiveLaneMask() const {
327   return TTIImpl->emitGetActiveLaneMask();
328 }
329 
330 Optional<Instruction *>
331 TargetTransformInfo::instCombineIntrinsic(InstCombiner &IC,
332                                           IntrinsicInst &II) const {
333   return TTIImpl->instCombineIntrinsic(IC, II);
334 }
335 
336 Optional<Value *> TargetTransformInfo::simplifyDemandedUseBitsIntrinsic(
337     InstCombiner &IC, IntrinsicInst &II, APInt DemandedMask, KnownBits &Known,
338     bool &KnownBitsComputed) const {
339   return TTIImpl->simplifyDemandedUseBitsIntrinsic(IC, II, DemandedMask, Known,
340                                                    KnownBitsComputed);
341 }
342 
343 Optional<Value *> TargetTransformInfo::simplifyDemandedVectorEltsIntrinsic(
344     InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts,
345     APInt &UndefElts2, APInt &UndefElts3,
346     std::function<void(Instruction *, unsigned, APInt, APInt &)>
347         SimplifyAndSetOp) const {
348   return TTIImpl->simplifyDemandedVectorEltsIntrinsic(
349       IC, II, DemandedElts, UndefElts, UndefElts2, UndefElts3,
350       SimplifyAndSetOp);
351 }
352 
353 void TargetTransformInfo::getUnrollingPreferences(
354     Loop *L, ScalarEvolution &SE, UnrollingPreferences &UP) const {
355   return TTIImpl->getUnrollingPreferences(L, SE, UP);
356 }
357 
358 void TargetTransformInfo::getPeelingPreferences(Loop *L, ScalarEvolution &SE,
359                                                 PeelingPreferences &PP) const {
360   return TTIImpl->getPeelingPreferences(L, SE, PP);
361 }
362 
363 bool TargetTransformInfo::isLegalAddImmediate(int64_t Imm) const {
364   return TTIImpl->isLegalAddImmediate(Imm);
365 }
366 
367 bool TargetTransformInfo::isLegalICmpImmediate(int64_t Imm) const {
368   return TTIImpl->isLegalICmpImmediate(Imm);
369 }
370 
371 bool TargetTransformInfo::isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV,
372                                                 int64_t BaseOffset,
373                                                 bool HasBaseReg, int64_t Scale,
374                                                 unsigned AddrSpace,
375                                                 Instruction *I) const {
376   return TTIImpl->isLegalAddressingMode(Ty, BaseGV, BaseOffset, HasBaseReg,
377                                         Scale, AddrSpace, I);
378 }
379 
380 bool TargetTransformInfo::isLSRCostLess(LSRCost &C1, LSRCost &C2) const {
381   return TTIImpl->isLSRCostLess(C1, C2);
382 }
383 
384 bool TargetTransformInfo::isNumRegsMajorCostOfLSR() const {
385   return TTIImpl->isNumRegsMajorCostOfLSR();
386 }
387 
388 bool TargetTransformInfo::isProfitableLSRChainElement(Instruction *I) const {
389   return TTIImpl->isProfitableLSRChainElement(I);
390 }
391 
392 bool TargetTransformInfo::canMacroFuseCmp() const {
393   return TTIImpl->canMacroFuseCmp();
394 }
395 
396 bool TargetTransformInfo::canSaveCmp(Loop *L, BranchInst **BI,
397                                      ScalarEvolution *SE, LoopInfo *LI,
398                                      DominatorTree *DT, AssumptionCache *AC,
399                                      TargetLibraryInfo *LibInfo) const {
400   return TTIImpl->canSaveCmp(L, BI, SE, LI, DT, AC, LibInfo);
401 }
402 
403 bool TargetTransformInfo::shouldFavorPostInc() const {
404   return TTIImpl->shouldFavorPostInc();
405 }
406 
407 bool TargetTransformInfo::shouldFavorBackedgeIndex(const Loop *L) const {
408   return TTIImpl->shouldFavorBackedgeIndex(L);
409 }
410 
411 bool TargetTransformInfo::isLegalMaskedStore(Type *DataType,
412                                              Align Alignment) const {
413   return TTIImpl->isLegalMaskedStore(DataType, Alignment);
414 }
415 
416 bool TargetTransformInfo::isLegalMaskedLoad(Type *DataType,
417                                             Align Alignment) const {
418   return TTIImpl->isLegalMaskedLoad(DataType, Alignment);
419 }
420 
421 bool TargetTransformInfo::isLegalNTStore(Type *DataType,
422                                          Align Alignment) const {
423   return TTIImpl->isLegalNTStore(DataType, Alignment);
424 }
425 
426 bool TargetTransformInfo::isLegalNTLoad(Type *DataType, Align Alignment) const {
427   return TTIImpl->isLegalNTLoad(DataType, Alignment);
428 }
429 
430 bool TargetTransformInfo::isLegalMaskedGather(Type *DataType,
431                                               Align Alignment) const {
432   return TTIImpl->isLegalMaskedGather(DataType, Alignment);
433 }
434 
435 bool TargetTransformInfo::isLegalMaskedScatter(Type *DataType,
436                                                Align Alignment) const {
437   return TTIImpl->isLegalMaskedScatter(DataType, Alignment);
438 }
439 
440 bool TargetTransformInfo::isLegalMaskedCompressStore(Type *DataType) const {
441   return TTIImpl->isLegalMaskedCompressStore(DataType);
442 }
443 
444 bool TargetTransformInfo::isLegalMaskedExpandLoad(Type *DataType) const {
445   return TTIImpl->isLegalMaskedExpandLoad(DataType);
446 }
447 
448 bool TargetTransformInfo::hasDivRemOp(Type *DataType, bool IsSigned) const {
449   return TTIImpl->hasDivRemOp(DataType, IsSigned);
450 }
451 
452 bool TargetTransformInfo::hasVolatileVariant(Instruction *I,
453                                              unsigned AddrSpace) const {
454   return TTIImpl->hasVolatileVariant(I, AddrSpace);
455 }
456 
457 bool TargetTransformInfo::prefersVectorizedAddressing() const {
458   return TTIImpl->prefersVectorizedAddressing();
459 }
460 
461 int TargetTransformInfo::getScalingFactorCost(Type *Ty, GlobalValue *BaseGV,
462                                               int64_t BaseOffset,
463                                               bool HasBaseReg, int64_t Scale,
464                                               unsigned AddrSpace) const {
465   int Cost = TTIImpl->getScalingFactorCost(Ty, BaseGV, BaseOffset, HasBaseReg,
466                                            Scale, AddrSpace);
467   assert(Cost >= 0 && "TTI should not produce negative costs!");
468   return Cost;
469 }
470 
471 bool TargetTransformInfo::LSRWithInstrQueries() const {
472   return TTIImpl->LSRWithInstrQueries();
473 }
474 
475 bool TargetTransformInfo::isTruncateFree(Type *Ty1, Type *Ty2) const {
476   return TTIImpl->isTruncateFree(Ty1, Ty2);
477 }
478 
479 bool TargetTransformInfo::isProfitableToHoist(Instruction *I) const {
480   return TTIImpl->isProfitableToHoist(I);
481 }
482 
483 bool TargetTransformInfo::useAA() const { return TTIImpl->useAA(); }
484 
485 bool TargetTransformInfo::isTypeLegal(Type *Ty) const {
486   return TTIImpl->isTypeLegal(Ty);
487 }
488 
489 bool TargetTransformInfo::shouldBuildLookupTables() const {
490   return TTIImpl->shouldBuildLookupTables();
491 }
492 bool TargetTransformInfo::shouldBuildLookupTablesForConstant(
493     Constant *C) const {
494   return TTIImpl->shouldBuildLookupTablesForConstant(C);
495 }
496 
497 bool TargetTransformInfo::useColdCCForColdCall(Function &F) const {
498   return TTIImpl->useColdCCForColdCall(F);
499 }
500 
501 unsigned
502 TargetTransformInfo::getScalarizationOverhead(VectorType *Ty,
503                                               const APInt &DemandedElts,
504                                               bool Insert, bool Extract) const {
505   return TTIImpl->getScalarizationOverhead(Ty, DemandedElts, Insert, Extract);
506 }
507 
508 unsigned TargetTransformInfo::getOperandsScalarizationOverhead(
509     ArrayRef<const Value *> Args, unsigned VF) const {
510   return TTIImpl->getOperandsScalarizationOverhead(Args, VF);
511 }
512 
513 bool TargetTransformInfo::supportsEfficientVectorElementLoadStore() const {
514   return TTIImpl->supportsEfficientVectorElementLoadStore();
515 }
516 
517 bool TargetTransformInfo::enableAggressiveInterleaving(
518     bool LoopHasReductions) const {
519   return TTIImpl->enableAggressiveInterleaving(LoopHasReductions);
520 }
521 
522 TargetTransformInfo::MemCmpExpansionOptions
523 TargetTransformInfo::enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const {
524   return TTIImpl->enableMemCmpExpansion(OptSize, IsZeroCmp);
525 }
526 
527 bool TargetTransformInfo::enableInterleavedAccessVectorization() const {
528   return TTIImpl->enableInterleavedAccessVectorization();
529 }
530 
531 bool TargetTransformInfo::enableMaskedInterleavedAccessVectorization() const {
532   return TTIImpl->enableMaskedInterleavedAccessVectorization();
533 }
534 
535 bool TargetTransformInfo::isFPVectorizationPotentiallyUnsafe() const {
536   return TTIImpl->isFPVectorizationPotentiallyUnsafe();
537 }
538 
539 bool TargetTransformInfo::allowsMisalignedMemoryAccesses(LLVMContext &Context,
540                                                          unsigned BitWidth,
541                                                          unsigned AddressSpace,
542                                                          unsigned Alignment,
543                                                          bool *Fast) const {
544   return TTIImpl->allowsMisalignedMemoryAccesses(Context, BitWidth,
545                                                  AddressSpace, Alignment, Fast);
546 }
547 
548 TargetTransformInfo::PopcntSupportKind
549 TargetTransformInfo::getPopcntSupport(unsigned IntTyWidthInBit) const {
550   return TTIImpl->getPopcntSupport(IntTyWidthInBit);
551 }
552 
553 bool TargetTransformInfo::haveFastSqrt(Type *Ty) const {
554   return TTIImpl->haveFastSqrt(Ty);
555 }
556 
557 bool TargetTransformInfo::isFCmpOrdCheaperThanFCmpZero(Type *Ty) const {
558   return TTIImpl->isFCmpOrdCheaperThanFCmpZero(Ty);
559 }
560 
561 int TargetTransformInfo::getFPOpCost(Type *Ty) const {
562   int Cost = TTIImpl->getFPOpCost(Ty);
563   assert(Cost >= 0 && "TTI should not produce negative costs!");
564   return Cost;
565 }
566 
567 int TargetTransformInfo::getIntImmCodeSizeCost(unsigned Opcode, unsigned Idx,
568                                                const APInt &Imm,
569                                                Type *Ty) const {
570   int Cost = TTIImpl->getIntImmCodeSizeCost(Opcode, Idx, Imm, Ty);
571   assert(Cost >= 0 && "TTI should not produce negative costs!");
572   return Cost;
573 }
574 
575 int TargetTransformInfo::getIntImmCost(const APInt &Imm, Type *Ty,
576                                        TTI::TargetCostKind CostKind) const {
577   int Cost = TTIImpl->getIntImmCost(Imm, Ty, CostKind);
578   assert(Cost >= 0 && "TTI should not produce negative costs!");
579   return Cost;
580 }
581 
582 int TargetTransformInfo::getIntImmCostInst(unsigned Opcode, unsigned Idx,
583                                            const APInt &Imm, Type *Ty,
584                                            TTI::TargetCostKind CostKind,
585                                            Instruction *Inst) const {
586   int Cost = TTIImpl->getIntImmCostInst(Opcode, Idx, Imm, Ty, CostKind, Inst);
587   assert(Cost >= 0 && "TTI should not produce negative costs!");
588   return Cost;
589 }
590 
591 int
592 TargetTransformInfo::getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx,
593                                          const APInt &Imm, Type *Ty,
594                                          TTI::TargetCostKind CostKind) const {
595   int Cost = TTIImpl->getIntImmCostIntrin(IID, Idx, Imm, Ty, CostKind);
596   assert(Cost >= 0 && "TTI should not produce negative costs!");
597   return Cost;
598 }
599 
600 unsigned TargetTransformInfo::getNumberOfRegisters(unsigned ClassID) const {
601   return TTIImpl->getNumberOfRegisters(ClassID);
602 }
603 
604 unsigned TargetTransformInfo::getRegisterClassForType(bool Vector,
605                                                       Type *Ty) const {
606   return TTIImpl->getRegisterClassForType(Vector, Ty);
607 }
608 
609 const char *TargetTransformInfo::getRegisterClassName(unsigned ClassID) const {
610   return TTIImpl->getRegisterClassName(ClassID);
611 }
612 
613 unsigned TargetTransformInfo::getRegisterBitWidth(bool Vector) const {
614   return TTIImpl->getRegisterBitWidth(Vector);
615 }
616 
617 unsigned TargetTransformInfo::getMinVectorRegisterBitWidth() const {
618   return TTIImpl->getMinVectorRegisterBitWidth();
619 }
620 
621 bool TargetTransformInfo::shouldMaximizeVectorBandwidth(bool OptSize) const {
622   return TTIImpl->shouldMaximizeVectorBandwidth(OptSize);
623 }
624 
625 unsigned TargetTransformInfo::getMinimumVF(unsigned ElemWidth) const {
626   return TTIImpl->getMinimumVF(ElemWidth);
627 }
628 
629 bool TargetTransformInfo::shouldConsiderAddressTypePromotion(
630     const Instruction &I, bool &AllowPromotionWithoutCommonHeader) const {
631   return TTIImpl->shouldConsiderAddressTypePromotion(
632       I, AllowPromotionWithoutCommonHeader);
633 }
634 
635 unsigned TargetTransformInfo::getCacheLineSize() const {
636   return TTIImpl->getCacheLineSize();
637 }
638 
639 llvm::Optional<unsigned>
640 TargetTransformInfo::getCacheSize(CacheLevel Level) const {
641   return TTIImpl->getCacheSize(Level);
642 }
643 
644 llvm::Optional<unsigned>
645 TargetTransformInfo::getCacheAssociativity(CacheLevel Level) const {
646   return TTIImpl->getCacheAssociativity(Level);
647 }
648 
649 unsigned TargetTransformInfo::getPrefetchDistance() const {
650   return TTIImpl->getPrefetchDistance();
651 }
652 
653 unsigned TargetTransformInfo::getMinPrefetchStride(
654     unsigned NumMemAccesses, unsigned NumStridedMemAccesses,
655     unsigned NumPrefetches, bool HasCall) const {
656   return TTIImpl->getMinPrefetchStride(NumMemAccesses, NumStridedMemAccesses,
657                                        NumPrefetches, HasCall);
658 }
659 
660 unsigned TargetTransformInfo::getMaxPrefetchIterationsAhead() const {
661   return TTIImpl->getMaxPrefetchIterationsAhead();
662 }
663 
664 bool TargetTransformInfo::enableWritePrefetching() const {
665   return TTIImpl->enableWritePrefetching();
666 }
667 
668 unsigned TargetTransformInfo::getMaxInterleaveFactor(unsigned VF) const {
669   return TTIImpl->getMaxInterleaveFactor(VF);
670 }
671 
672 TargetTransformInfo::OperandValueKind
673 TargetTransformInfo::getOperandInfo(const Value *V,
674                                     OperandValueProperties &OpProps) {
675   OperandValueKind OpInfo = OK_AnyValue;
676   OpProps = OP_None;
677 
678   if (const auto *CI = dyn_cast<ConstantInt>(V)) {
679     if (CI->getValue().isPowerOf2())
680       OpProps = OP_PowerOf2;
681     return OK_UniformConstantValue;
682   }
683 
684   // A broadcast shuffle creates a uniform value.
685   // TODO: Add support for non-zero index broadcasts.
686   // TODO: Add support for different source vector width.
687   if (const auto *ShuffleInst = dyn_cast<ShuffleVectorInst>(V))
688     if (ShuffleInst->isZeroEltSplat())
689       OpInfo = OK_UniformValue;
690 
691   const Value *Splat = getSplatValue(V);
692 
693   // Check for a splat of a constant or for a non uniform vector of constants
694   // and check if the constant(s) are all powers of two.
695   if (isa<ConstantVector>(V) || isa<ConstantDataVector>(V)) {
696     OpInfo = OK_NonUniformConstantValue;
697     if (Splat) {
698       OpInfo = OK_UniformConstantValue;
699       if (auto *CI = dyn_cast<ConstantInt>(Splat))
700         if (CI->getValue().isPowerOf2())
701           OpProps = OP_PowerOf2;
702     } else if (const auto *CDS = dyn_cast<ConstantDataSequential>(V)) {
703       OpProps = OP_PowerOf2;
704       for (unsigned I = 0, E = CDS->getNumElements(); I != E; ++I) {
705         if (auto *CI = dyn_cast<ConstantInt>(CDS->getElementAsConstant(I)))
706           if (CI->getValue().isPowerOf2())
707             continue;
708         OpProps = OP_None;
709         break;
710       }
711     }
712   }
713 
714   // Check for a splat of a uniform value. This is not loop aware, so return
715   // true only for the obviously uniform cases (argument, globalvalue)
716   if (Splat && (isa<Argument>(Splat) || isa<GlobalValue>(Splat)))
717     OpInfo = OK_UniformValue;
718 
719   return OpInfo;
720 }
721 
722 int TargetTransformInfo::getArithmeticInstrCost(
723     unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind,
724     OperandValueKind Opd1Info,
725     OperandValueKind Opd2Info, OperandValueProperties Opd1PropInfo,
726     OperandValueProperties Opd2PropInfo, ArrayRef<const Value *> Args,
727     const Instruction *CxtI) const {
728   int Cost = TTIImpl->getArithmeticInstrCost(
729       Opcode, Ty, CostKind, Opd1Info, Opd2Info, Opd1PropInfo, Opd2PropInfo,
730       Args, CxtI);
731   assert(Cost >= 0 && "TTI should not produce negative costs!");
732   return Cost;
733 }
734 
735 int TargetTransformInfo::getShuffleCost(ShuffleKind Kind, VectorType *Ty,
736                                         int Index, VectorType *SubTp) const {
737   int Cost = TTIImpl->getShuffleCost(Kind, Ty, Index, SubTp);
738   assert(Cost >= 0 && "TTI should not produce negative costs!");
739   return Cost;
740 }
741 
742 TTI::CastContextHint
743 TargetTransformInfo::getCastContextHint(const Instruction *I) {
744   if (!I)
745     return CastContextHint::None;
746 
747   auto getLoadStoreKind = [](const Value *V, unsigned LdStOp, unsigned MaskedOp,
748                              unsigned GatScatOp) {
749     const Instruction *I = dyn_cast<Instruction>(V);
750     if (!I)
751       return CastContextHint::None;
752 
753     if (I->getOpcode() == LdStOp)
754       return CastContextHint::Normal;
755 
756     if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
757       if (II->getIntrinsicID() == MaskedOp)
758         return TTI::CastContextHint::Masked;
759       if (II->getIntrinsicID() == GatScatOp)
760         return TTI::CastContextHint::GatherScatter;
761     }
762 
763     return TTI::CastContextHint::None;
764   };
765 
766   switch (I->getOpcode()) {
767   case Instruction::ZExt:
768   case Instruction::SExt:
769   case Instruction::FPExt:
770     return getLoadStoreKind(I->getOperand(0), Instruction::Load,
771                             Intrinsic::masked_load, Intrinsic::masked_gather);
772   case Instruction::Trunc:
773   case Instruction::FPTrunc:
774     if (I->hasOneUse())
775       return getLoadStoreKind(*I->user_begin(), Instruction::Store,
776                               Intrinsic::masked_store,
777                               Intrinsic::masked_scatter);
778     break;
779   default:
780     return CastContextHint::None;
781   }
782 
783   return TTI::CastContextHint::None;
784 }
785 
786 int TargetTransformInfo::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
787                                           CastContextHint CCH,
788                                           TTI::TargetCostKind CostKind,
789                                           const Instruction *I) const {
790   assert((I == nullptr || I->getOpcode() == Opcode) &&
791          "Opcode should reflect passed instruction.");
792   int Cost = TTIImpl->getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I);
793   assert(Cost >= 0 && "TTI should not produce negative costs!");
794   return Cost;
795 }
796 
797 int TargetTransformInfo::getExtractWithExtendCost(unsigned Opcode, Type *Dst,
798                                                   VectorType *VecTy,
799                                                   unsigned Index) const {
800   int Cost = TTIImpl->getExtractWithExtendCost(Opcode, Dst, VecTy, Index);
801   assert(Cost >= 0 && "TTI should not produce negative costs!");
802   return Cost;
803 }
804 
805 int TargetTransformInfo::getCFInstrCost(unsigned Opcode,
806                                         TTI::TargetCostKind CostKind) const {
807   int Cost = TTIImpl->getCFInstrCost(Opcode, CostKind);
808   assert(Cost >= 0 && "TTI should not produce negative costs!");
809   return Cost;
810 }
811 
812 int TargetTransformInfo::getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
813                                             Type *CondTy,
814                                             CmpInst::Predicate VecPred,
815                                             TTI::TargetCostKind CostKind,
816                                             const Instruction *I) const {
817   assert((I == nullptr || I->getOpcode() == Opcode) &&
818          "Opcode should reflect passed instruction.");
819   int Cost =
820       TTIImpl->getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind, I);
821   assert(Cost >= 0 && "TTI should not produce negative costs!");
822   return Cost;
823 }
824 
825 int TargetTransformInfo::getVectorInstrCost(unsigned Opcode, Type *Val,
826                                             unsigned Index) const {
827   int Cost = TTIImpl->getVectorInstrCost(Opcode, Val, Index);
828   assert(Cost >= 0 && "TTI should not produce negative costs!");
829   return Cost;
830 }
831 
832 int TargetTransformInfo::getMemoryOpCost(unsigned Opcode, Type *Src,
833                                          Align Alignment, unsigned AddressSpace,
834                                          TTI::TargetCostKind CostKind,
835                                          const Instruction *I) const {
836   assert((I == nullptr || I->getOpcode() == Opcode) &&
837          "Opcode should reflect passed instruction.");
838   int Cost = TTIImpl->getMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
839                                       CostKind, I);
840   assert(Cost >= 0 && "TTI should not produce negative costs!");
841   return Cost;
842 }
843 
844 int TargetTransformInfo::getMaskedMemoryOpCost(
845     unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace,
846     TTI::TargetCostKind CostKind) const {
847   int Cost =
848       TTIImpl->getMaskedMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
849                                      CostKind);
850   assert(Cost >= 0 && "TTI should not produce negative costs!");
851   return Cost;
852 }
853 
854 int TargetTransformInfo::getGatherScatterOpCost(
855     unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask,
856     Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I) const {
857   int Cost = TTIImpl->getGatherScatterOpCost(Opcode, DataTy, Ptr, VariableMask,
858                                              Alignment, CostKind, I);
859   assert(Cost >= 0 && "TTI should not produce negative costs!");
860   return Cost;
861 }
862 
863 int TargetTransformInfo::getInterleavedMemoryOpCost(
864     unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
865     Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
866     bool UseMaskForCond, bool UseMaskForGaps) const {
867   int Cost = TTIImpl->getInterleavedMemoryOpCost(
868       Opcode, VecTy, Factor, Indices, Alignment, AddressSpace, CostKind,
869       UseMaskForCond, UseMaskForGaps);
870   assert(Cost >= 0 && "TTI should not produce negative costs!");
871   return Cost;
872 }
873 
874 int
875 TargetTransformInfo::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
876                                            TTI::TargetCostKind CostKind) const {
877   int Cost = TTIImpl->getIntrinsicInstrCost(ICA, CostKind);
878   assert(Cost >= 0 && "TTI should not produce negative costs!");
879   return Cost;
880 }
881 
882 int TargetTransformInfo::getCallInstrCost(Function *F, Type *RetTy,
883                                           ArrayRef<Type *> Tys,
884                                           TTI::TargetCostKind CostKind) const {
885   int Cost = TTIImpl->getCallInstrCost(F, RetTy, Tys, CostKind);
886   assert(Cost >= 0 && "TTI should not produce negative costs!");
887   return Cost;
888 }
889 
890 unsigned TargetTransformInfo::getNumberOfParts(Type *Tp) const {
891   return TTIImpl->getNumberOfParts(Tp);
892 }
893 
894 int TargetTransformInfo::getAddressComputationCost(Type *Tp,
895                                                    ScalarEvolution *SE,
896                                                    const SCEV *Ptr) const {
897   int Cost = TTIImpl->getAddressComputationCost(Tp, SE, Ptr);
898   assert(Cost >= 0 && "TTI should not produce negative costs!");
899   return Cost;
900 }
901 
902 int TargetTransformInfo::getMemcpyCost(const Instruction *I) const {
903   int Cost = TTIImpl->getMemcpyCost(I);
904   assert(Cost >= 0 && "TTI should not produce negative costs!");
905   return Cost;
906 }
907 
908 int TargetTransformInfo::getArithmeticReductionCost(unsigned Opcode,
909                                                     VectorType *Ty,
910                                                     bool IsPairwiseForm,
911                                                     TTI::TargetCostKind CostKind) const {
912   int Cost = TTIImpl->getArithmeticReductionCost(Opcode, Ty, IsPairwiseForm,
913                                                  CostKind);
914   assert(Cost >= 0 && "TTI should not produce negative costs!");
915   return Cost;
916 }
917 
918 int TargetTransformInfo::getMinMaxReductionCost(
919     VectorType *Ty, VectorType *CondTy, bool IsPairwiseForm, bool IsUnsigned,
920     TTI::TargetCostKind CostKind) const {
921   int Cost =
922       TTIImpl->getMinMaxReductionCost(Ty, CondTy, IsPairwiseForm, IsUnsigned,
923                                       CostKind);
924   assert(Cost >= 0 && "TTI should not produce negative costs!");
925   return Cost;
926 }
927 
928 unsigned
929 TargetTransformInfo::getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys) const {
930   return TTIImpl->getCostOfKeepingLiveOverCall(Tys);
931 }
932 
933 bool TargetTransformInfo::getTgtMemIntrinsic(IntrinsicInst *Inst,
934                                              MemIntrinsicInfo &Info) const {
935   return TTIImpl->getTgtMemIntrinsic(Inst, Info);
936 }
937 
938 unsigned TargetTransformInfo::getAtomicMemIntrinsicMaxElementSize() const {
939   return TTIImpl->getAtomicMemIntrinsicMaxElementSize();
940 }
941 
942 Value *TargetTransformInfo::getOrCreateResultFromMemIntrinsic(
943     IntrinsicInst *Inst, Type *ExpectedType) const {
944   return TTIImpl->getOrCreateResultFromMemIntrinsic(Inst, ExpectedType);
945 }
946 
947 Type *TargetTransformInfo::getMemcpyLoopLoweringType(
948     LLVMContext &Context, Value *Length, unsigned SrcAddrSpace,
949     unsigned DestAddrSpace, unsigned SrcAlign, unsigned DestAlign) const {
950   return TTIImpl->getMemcpyLoopLoweringType(Context, Length, SrcAddrSpace,
951                                             DestAddrSpace, SrcAlign, DestAlign);
952 }
953 
954 void TargetTransformInfo::getMemcpyLoopResidualLoweringType(
955     SmallVectorImpl<Type *> &OpsOut, LLVMContext &Context,
956     unsigned RemainingBytes, unsigned SrcAddrSpace, unsigned DestAddrSpace,
957     unsigned SrcAlign, unsigned DestAlign) const {
958   TTIImpl->getMemcpyLoopResidualLoweringType(OpsOut, Context, RemainingBytes,
959                                              SrcAddrSpace, DestAddrSpace,
960                                              SrcAlign, DestAlign);
961 }
962 
963 bool TargetTransformInfo::areInlineCompatible(const Function *Caller,
964                                               const Function *Callee) const {
965   return TTIImpl->areInlineCompatible(Caller, Callee);
966 }
967 
968 bool TargetTransformInfo::areFunctionArgsABICompatible(
969     const Function *Caller, const Function *Callee,
970     SmallPtrSetImpl<Argument *> &Args) const {
971   return TTIImpl->areFunctionArgsABICompatible(Caller, Callee, Args);
972 }
973 
974 bool TargetTransformInfo::isIndexedLoadLegal(MemIndexedMode Mode,
975                                              Type *Ty) const {
976   return TTIImpl->isIndexedLoadLegal(Mode, Ty);
977 }
978 
979 bool TargetTransformInfo::isIndexedStoreLegal(MemIndexedMode Mode,
980                                               Type *Ty) const {
981   return TTIImpl->isIndexedStoreLegal(Mode, Ty);
982 }
983 
984 unsigned TargetTransformInfo::getLoadStoreVecRegBitWidth(unsigned AS) const {
985   return TTIImpl->getLoadStoreVecRegBitWidth(AS);
986 }
987 
988 bool TargetTransformInfo::isLegalToVectorizeLoad(LoadInst *LI) const {
989   return TTIImpl->isLegalToVectorizeLoad(LI);
990 }
991 
992 bool TargetTransformInfo::isLegalToVectorizeStore(StoreInst *SI) const {
993   return TTIImpl->isLegalToVectorizeStore(SI);
994 }
995 
996 bool TargetTransformInfo::isLegalToVectorizeLoadChain(
997     unsigned ChainSizeInBytes, Align Alignment, unsigned AddrSpace) const {
998   return TTIImpl->isLegalToVectorizeLoadChain(ChainSizeInBytes, Alignment,
999                                               AddrSpace);
1000 }
1001 
1002 bool TargetTransformInfo::isLegalToVectorizeStoreChain(
1003     unsigned ChainSizeInBytes, Align Alignment, unsigned AddrSpace) const {
1004   return TTIImpl->isLegalToVectorizeStoreChain(ChainSizeInBytes, Alignment,
1005                                                AddrSpace);
1006 }
1007 
1008 unsigned TargetTransformInfo::getLoadVectorFactor(unsigned VF,
1009                                                   unsigned LoadSize,
1010                                                   unsigned ChainSizeInBytes,
1011                                                   VectorType *VecTy) const {
1012   return TTIImpl->getLoadVectorFactor(VF, LoadSize, ChainSizeInBytes, VecTy);
1013 }
1014 
1015 unsigned TargetTransformInfo::getStoreVectorFactor(unsigned VF,
1016                                                    unsigned StoreSize,
1017                                                    unsigned ChainSizeInBytes,
1018                                                    VectorType *VecTy) const {
1019   return TTIImpl->getStoreVectorFactor(VF, StoreSize, ChainSizeInBytes, VecTy);
1020 }
1021 
1022 bool TargetTransformInfo::useReductionIntrinsic(unsigned Opcode, Type *Ty,
1023                                                 ReductionFlags Flags) const {
1024   return TTIImpl->useReductionIntrinsic(Opcode, Ty, Flags);
1025 }
1026 
1027 bool TargetTransformInfo::preferInLoopReduction(unsigned Opcode, Type *Ty,
1028                                                 ReductionFlags Flags) const {
1029   return TTIImpl->preferInLoopReduction(Opcode, Ty, Flags);
1030 }
1031 
1032 bool TargetTransformInfo::preferPredicatedReductionSelect(
1033     unsigned Opcode, Type *Ty, ReductionFlags Flags) const {
1034   return TTIImpl->preferPredicatedReductionSelect(Opcode, Ty, Flags);
1035 }
1036 
1037 bool TargetTransformInfo::shouldExpandReduction(const IntrinsicInst *II) const {
1038   return TTIImpl->shouldExpandReduction(II);
1039 }
1040 
1041 unsigned TargetTransformInfo::getGISelRematGlobalCost() const {
1042   return TTIImpl->getGISelRematGlobalCost();
1043 }
1044 
1045 int TargetTransformInfo::getInstructionLatency(const Instruction *I) const {
1046   return TTIImpl->getInstructionLatency(I);
1047 }
1048 
1049 static bool matchPairwiseShuffleMask(ShuffleVectorInst *SI, bool IsLeft,
1050                                      unsigned Level) {
1051   // We don't need a shuffle if we just want to have element 0 in position 0 of
1052   // the vector.
1053   if (!SI && Level == 0 && IsLeft)
1054     return true;
1055   else if (!SI)
1056     return false;
1057 
1058   SmallVector<int, 32> Mask(
1059       cast<FixedVectorType>(SI->getType())->getNumElements(), -1);
1060 
1061   // Build a mask of 0, 2, ... (left) or 1, 3, ... (right) depending on whether
1062   // we look at the left or right side.
1063   for (unsigned i = 0, e = (1 << Level), val = !IsLeft; i != e; ++i, val += 2)
1064     Mask[i] = val;
1065 
1066   ArrayRef<int> ActualMask = SI->getShuffleMask();
1067   return Mask == ActualMask;
1068 }
1069 
1070 static Optional<TTI::ReductionData> getReductionData(Instruction *I) {
1071   Value *L, *R;
1072   if (m_BinOp(m_Value(L), m_Value(R)).match(I))
1073     return TTI::ReductionData(TTI::RK_Arithmetic, I->getOpcode(), L, R);
1074   if (auto *SI = dyn_cast<SelectInst>(I)) {
1075     if (m_SMin(m_Value(L), m_Value(R)).match(SI) ||
1076         m_SMax(m_Value(L), m_Value(R)).match(SI) ||
1077         m_OrdFMin(m_Value(L), m_Value(R)).match(SI) ||
1078         m_OrdFMax(m_Value(L), m_Value(R)).match(SI) ||
1079         m_UnordFMin(m_Value(L), m_Value(R)).match(SI) ||
1080         m_UnordFMax(m_Value(L), m_Value(R)).match(SI)) {
1081       auto *CI = cast<CmpInst>(SI->getCondition());
1082       return TTI::ReductionData(TTI::RK_MinMax, CI->getOpcode(), L, R);
1083     }
1084     if (m_UMin(m_Value(L), m_Value(R)).match(SI) ||
1085         m_UMax(m_Value(L), m_Value(R)).match(SI)) {
1086       auto *CI = cast<CmpInst>(SI->getCondition());
1087       return TTI::ReductionData(TTI::RK_UnsignedMinMax, CI->getOpcode(), L, R);
1088     }
1089   }
1090   return llvm::None;
1091 }
1092 
1093 static TTI::ReductionKind matchPairwiseReductionAtLevel(Instruction *I,
1094                                                         unsigned Level,
1095                                                         unsigned NumLevels) {
1096   // Match one level of pairwise operations.
1097   // %rdx.shuf.0.0 = shufflevector <4 x float> %rdx, <4 x float> undef,
1098   //       <4 x i32> <i32 0, i32 2 , i32 undef, i32 undef>
1099   // %rdx.shuf.0.1 = shufflevector <4 x float> %rdx, <4 x float> undef,
1100   //       <4 x i32> <i32 1, i32 3, i32 undef, i32 undef>
1101   // %bin.rdx.0 = fadd <4 x float> %rdx.shuf.0.0, %rdx.shuf.0.1
1102   if (!I)
1103     return TTI::RK_None;
1104 
1105   assert(I->getType()->isVectorTy() && "Expecting a vector type");
1106 
1107   Optional<TTI::ReductionData> RD = getReductionData(I);
1108   if (!RD)
1109     return TTI::RK_None;
1110 
1111   ShuffleVectorInst *LS = dyn_cast<ShuffleVectorInst>(RD->LHS);
1112   if (!LS && Level)
1113     return TTI::RK_None;
1114   ShuffleVectorInst *RS = dyn_cast<ShuffleVectorInst>(RD->RHS);
1115   if (!RS && Level)
1116     return TTI::RK_None;
1117 
1118   // On level 0 we can omit one shufflevector instruction.
1119   if (!Level && !RS && !LS)
1120     return TTI::RK_None;
1121 
1122   // Shuffle inputs must match.
1123   Value *NextLevelOpL = LS ? LS->getOperand(0) : nullptr;
1124   Value *NextLevelOpR = RS ? RS->getOperand(0) : nullptr;
1125   Value *NextLevelOp = nullptr;
1126   if (NextLevelOpR && NextLevelOpL) {
1127     // If we have two shuffles their operands must match.
1128     if (NextLevelOpL != NextLevelOpR)
1129       return TTI::RK_None;
1130 
1131     NextLevelOp = NextLevelOpL;
1132   } else if (Level == 0 && (NextLevelOpR || NextLevelOpL)) {
1133     // On the first level we can omit the shufflevector <0, undef,...>. So the
1134     // input to the other shufflevector <1, undef> must match with one of the
1135     // inputs to the current binary operation.
1136     // Example:
1137     //  %NextLevelOpL = shufflevector %R, <1, undef ...>
1138     //  %BinOp        = fadd          %NextLevelOpL, %R
1139     if (NextLevelOpL && NextLevelOpL != RD->RHS)
1140       return TTI::RK_None;
1141     else if (NextLevelOpR && NextLevelOpR != RD->LHS)
1142       return TTI::RK_None;
1143 
1144     NextLevelOp = NextLevelOpL ? RD->RHS : RD->LHS;
1145   } else
1146     return TTI::RK_None;
1147 
1148   // Check that the next levels binary operation exists and matches with the
1149   // current one.
1150   if (Level + 1 != NumLevels) {
1151     if (!isa<Instruction>(NextLevelOp))
1152       return TTI::RK_None;
1153     Optional<TTI::ReductionData> NextLevelRD =
1154         getReductionData(cast<Instruction>(NextLevelOp));
1155     if (!NextLevelRD || !RD->hasSameData(*NextLevelRD))
1156       return TTI::RK_None;
1157   }
1158 
1159   // Shuffle mask for pairwise operation must match.
1160   if (matchPairwiseShuffleMask(LS, /*IsLeft=*/true, Level)) {
1161     if (!matchPairwiseShuffleMask(RS, /*IsLeft=*/false, Level))
1162       return TTI::RK_None;
1163   } else if (matchPairwiseShuffleMask(RS, /*IsLeft=*/true, Level)) {
1164     if (!matchPairwiseShuffleMask(LS, /*IsLeft=*/false, Level))
1165       return TTI::RK_None;
1166   } else {
1167     return TTI::RK_None;
1168   }
1169 
1170   if (++Level == NumLevels)
1171     return RD->Kind;
1172 
1173   // Match next level.
1174   return matchPairwiseReductionAtLevel(dyn_cast<Instruction>(NextLevelOp), Level,
1175                                        NumLevels);
1176 }
1177 
1178 TTI::ReductionKind TTI::matchPairwiseReduction(
1179   const ExtractElementInst *ReduxRoot, unsigned &Opcode, VectorType *&Ty) {
1180   if (!EnableReduxCost)
1181     return TTI::RK_None;
1182 
1183   // Need to extract the first element.
1184   ConstantInt *CI = dyn_cast<ConstantInt>(ReduxRoot->getOperand(1));
1185   unsigned Idx = ~0u;
1186   if (CI)
1187     Idx = CI->getZExtValue();
1188   if (Idx != 0)
1189     return TTI::RK_None;
1190 
1191   auto *RdxStart = dyn_cast<Instruction>(ReduxRoot->getOperand(0));
1192   if (!RdxStart)
1193     return TTI::RK_None;
1194   Optional<TTI::ReductionData> RD = getReductionData(RdxStart);
1195   if (!RD)
1196     return TTI::RK_None;
1197 
1198   auto *VecTy = cast<FixedVectorType>(RdxStart->getType());
1199   unsigned NumVecElems = VecTy->getNumElements();
1200   if (!isPowerOf2_32(NumVecElems))
1201     return TTI::RK_None;
1202 
1203   // We look for a sequence of shuffle,shuffle,add triples like the following
1204   // that builds a pairwise reduction tree.
1205   //
1206   //  (X0, X1, X2, X3)
1207   //   (X0 + X1, X2 + X3, undef, undef)
1208   //    ((X0 + X1) + (X2 + X3), undef, undef, undef)
1209   //
1210   // %rdx.shuf.0.0 = shufflevector <4 x float> %rdx, <4 x float> undef,
1211   //       <4 x i32> <i32 0, i32 2 , i32 undef, i32 undef>
1212   // %rdx.shuf.0.1 = shufflevector <4 x float> %rdx, <4 x float> undef,
1213   //       <4 x i32> <i32 1, i32 3, i32 undef, i32 undef>
1214   // %bin.rdx.0 = fadd <4 x float> %rdx.shuf.0.0, %rdx.shuf.0.1
1215   // %rdx.shuf.1.0 = shufflevector <4 x float> %bin.rdx.0, <4 x float> undef,
1216   //       <4 x i32> <i32 0, i32 undef, i32 undef, i32 undef>
1217   // %rdx.shuf.1.1 = shufflevector <4 x float> %bin.rdx.0, <4 x float> undef,
1218   //       <4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
1219   // %bin.rdx8 = fadd <4 x float> %rdx.shuf.1.0, %rdx.shuf.1.1
1220   // %r = extractelement <4 x float> %bin.rdx8, i32 0
1221   if (matchPairwiseReductionAtLevel(RdxStart, 0, Log2_32(NumVecElems)) ==
1222       TTI::RK_None)
1223     return TTI::RK_None;
1224 
1225   Opcode = RD->Opcode;
1226   Ty = VecTy;
1227 
1228   return RD->Kind;
1229 }
1230 
1231 static std::pair<Value *, ShuffleVectorInst *>
1232 getShuffleAndOtherOprd(Value *L, Value *R) {
1233   ShuffleVectorInst *S = nullptr;
1234 
1235   if ((S = dyn_cast<ShuffleVectorInst>(L)))
1236     return std::make_pair(R, S);
1237 
1238   S = dyn_cast<ShuffleVectorInst>(R);
1239   return std::make_pair(L, S);
1240 }
1241 
1242 TTI::ReductionKind TTI::matchVectorSplittingReduction(
1243   const ExtractElementInst *ReduxRoot, unsigned &Opcode, VectorType *&Ty) {
1244 
1245   if (!EnableReduxCost)
1246     return TTI::RK_None;
1247 
1248   // Need to extract the first element.
1249   ConstantInt *CI = dyn_cast<ConstantInt>(ReduxRoot->getOperand(1));
1250   unsigned Idx = ~0u;
1251   if (CI)
1252     Idx = CI->getZExtValue();
1253   if (Idx != 0)
1254     return TTI::RK_None;
1255 
1256   auto *RdxStart = dyn_cast<Instruction>(ReduxRoot->getOperand(0));
1257   if (!RdxStart)
1258     return TTI::RK_None;
1259   Optional<TTI::ReductionData> RD = getReductionData(RdxStart);
1260   if (!RD)
1261     return TTI::RK_None;
1262 
1263   auto *VecTy = cast<FixedVectorType>(ReduxRoot->getOperand(0)->getType());
1264   unsigned NumVecElems = VecTy->getNumElements();
1265   if (!isPowerOf2_32(NumVecElems))
1266     return TTI::RK_None;
1267 
1268   // We look for a sequence of shuffles and adds like the following matching one
1269   // fadd, shuffle vector pair at a time.
1270   //
1271   // %rdx.shuf = shufflevector <4 x float> %rdx, <4 x float> undef,
1272   //                           <4 x i32> <i32 2, i32 3, i32 undef, i32 undef>
1273   // %bin.rdx = fadd <4 x float> %rdx, %rdx.shuf
1274   // %rdx.shuf7 = shufflevector <4 x float> %bin.rdx, <4 x float> undef,
1275   //                          <4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
1276   // %bin.rdx8 = fadd <4 x float> %bin.rdx, %rdx.shuf7
1277   // %r = extractelement <4 x float> %bin.rdx8, i32 0
1278 
1279   unsigned MaskStart = 1;
1280   Instruction *RdxOp = RdxStart;
1281   SmallVector<int, 32> ShuffleMask(NumVecElems, 0);
1282   unsigned NumVecElemsRemain = NumVecElems;
1283   while (NumVecElemsRemain - 1) {
1284     // Check for the right reduction operation.
1285     if (!RdxOp)
1286       return TTI::RK_None;
1287     Optional<TTI::ReductionData> RDLevel = getReductionData(RdxOp);
1288     if (!RDLevel || !RDLevel->hasSameData(*RD))
1289       return TTI::RK_None;
1290 
1291     Value *NextRdxOp;
1292     ShuffleVectorInst *Shuffle;
1293     std::tie(NextRdxOp, Shuffle) =
1294         getShuffleAndOtherOprd(RDLevel->LHS, RDLevel->RHS);
1295 
1296     // Check the current reduction operation and the shuffle use the same value.
1297     if (Shuffle == nullptr)
1298       return TTI::RK_None;
1299     if (Shuffle->getOperand(0) != NextRdxOp)
1300       return TTI::RK_None;
1301 
1302     // Check that shuffle masks matches.
1303     for (unsigned j = 0; j != MaskStart; ++j)
1304       ShuffleMask[j] = MaskStart + j;
1305     // Fill the rest of the mask with -1 for undef.
1306     std::fill(&ShuffleMask[MaskStart], ShuffleMask.end(), -1);
1307 
1308     ArrayRef<int> Mask = Shuffle->getShuffleMask();
1309     if (ShuffleMask != Mask)
1310       return TTI::RK_None;
1311 
1312     RdxOp = dyn_cast<Instruction>(NextRdxOp);
1313     NumVecElemsRemain /= 2;
1314     MaskStart *= 2;
1315   }
1316 
1317   Opcode = RD->Opcode;
1318   Ty = VecTy;
1319   return RD->Kind;
1320 }
1321 
1322 TTI::ReductionKind
1323 TTI::matchVectorReduction(const ExtractElementInst *Root, unsigned &Opcode,
1324                           VectorType *&Ty, bool &IsPairwise) {
1325   TTI::ReductionKind RdxKind = matchVectorSplittingReduction(Root, Opcode, Ty);
1326   if (RdxKind != TTI::ReductionKind::RK_None) {
1327     IsPairwise = false;
1328     return RdxKind;
1329   }
1330   IsPairwise = true;
1331   return matchPairwiseReduction(Root, Opcode, Ty);
1332 }
1333 
1334 int TargetTransformInfo::getInstructionThroughput(const Instruction *I) const {
1335   TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
1336 
1337   switch (I->getOpcode()) {
1338   case Instruction::GetElementPtr:
1339   case Instruction::Ret:
1340   case Instruction::PHI:
1341   case Instruction::Br:
1342   case Instruction::Add:
1343   case Instruction::FAdd:
1344   case Instruction::Sub:
1345   case Instruction::FSub:
1346   case Instruction::Mul:
1347   case Instruction::FMul:
1348   case Instruction::UDiv:
1349   case Instruction::SDiv:
1350   case Instruction::FDiv:
1351   case Instruction::URem:
1352   case Instruction::SRem:
1353   case Instruction::FRem:
1354   case Instruction::Shl:
1355   case Instruction::LShr:
1356   case Instruction::AShr:
1357   case Instruction::And:
1358   case Instruction::Or:
1359   case Instruction::Xor:
1360   case Instruction::FNeg:
1361   case Instruction::Select:
1362   case Instruction::ICmp:
1363   case Instruction::FCmp:
1364   case Instruction::Store:
1365   case Instruction::Load:
1366   case Instruction::ZExt:
1367   case Instruction::SExt:
1368   case Instruction::FPToUI:
1369   case Instruction::FPToSI:
1370   case Instruction::FPExt:
1371   case Instruction::PtrToInt:
1372   case Instruction::IntToPtr:
1373   case Instruction::SIToFP:
1374   case Instruction::UIToFP:
1375   case Instruction::Trunc:
1376   case Instruction::FPTrunc:
1377   case Instruction::BitCast:
1378   case Instruction::AddrSpaceCast:
1379   case Instruction::ExtractElement:
1380   case Instruction::InsertElement:
1381   case Instruction::ExtractValue:
1382   case Instruction::ShuffleVector:
1383   case Instruction::Call:
1384     return getUserCost(I, CostKind);
1385   default:
1386     // We don't have any information on this instruction.
1387     return -1;
1388   }
1389 }
1390 
1391 TargetTransformInfo::Concept::~Concept() {}
1392 
1393 TargetIRAnalysis::TargetIRAnalysis() : TTICallback(&getDefaultTTI) {}
1394 
1395 TargetIRAnalysis::TargetIRAnalysis(
1396     std::function<Result(const Function &)> TTICallback)
1397     : TTICallback(std::move(TTICallback)) {}
1398 
1399 TargetIRAnalysis::Result TargetIRAnalysis::run(const Function &F,
1400                                                FunctionAnalysisManager &) {
1401   return TTICallback(F);
1402 }
1403 
1404 AnalysisKey TargetIRAnalysis::Key;
1405 
1406 TargetIRAnalysis::Result TargetIRAnalysis::getDefaultTTI(const Function &F) {
1407   return Result(F.getParent()->getDataLayout());
1408 }
1409 
1410 // Register the basic pass.
1411 INITIALIZE_PASS(TargetTransformInfoWrapperPass, "tti",
1412                 "Target Transform Information", false, true)
1413 char TargetTransformInfoWrapperPass::ID = 0;
1414 
1415 void TargetTransformInfoWrapperPass::anchor() {}
1416 
1417 TargetTransformInfoWrapperPass::TargetTransformInfoWrapperPass()
1418     : ImmutablePass(ID) {
1419   initializeTargetTransformInfoWrapperPassPass(
1420       *PassRegistry::getPassRegistry());
1421 }
1422 
1423 TargetTransformInfoWrapperPass::TargetTransformInfoWrapperPass(
1424     TargetIRAnalysis TIRA)
1425     : ImmutablePass(ID), TIRA(std::move(TIRA)) {
1426   initializeTargetTransformInfoWrapperPassPass(
1427       *PassRegistry::getPassRegistry());
1428 }
1429 
1430 TargetTransformInfo &TargetTransformInfoWrapperPass::getTTI(const Function &F) {
1431   FunctionAnalysisManager DummyFAM;
1432   TTI = TIRA.run(F, DummyFAM);
1433   return *TTI;
1434 }
1435 
1436 ImmutablePass *
1437 llvm::createTargetTransformInfoWrapperPass(TargetIRAnalysis TIRA) {
1438   return new TargetTransformInfoWrapperPass(std::move(TIRA));
1439 }
1440