1 //===- llvm/Analysis/TargetTransformInfo.cpp ------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "llvm/Analysis/TargetTransformInfo.h"
10 #include "llvm/Analysis/CFG.h"
11 #include "llvm/Analysis/LoopIterator.h"
12 #include "llvm/Analysis/TargetTransformInfoImpl.h"
13 #include "llvm/IR/CFG.h"
14 #include "llvm/IR/DataLayout.h"
15 #include "llvm/IR/Dominators.h"
16 #include "llvm/IR/Instruction.h"
17 #include "llvm/IR/Instructions.h"
18 #include "llvm/IR/IntrinsicInst.h"
19 #include "llvm/IR/Module.h"
20 #include "llvm/IR/Operator.h"
21 #include "llvm/IR/PatternMatch.h"
22 #include "llvm/InitializePasses.h"
23 #include "llvm/Support/CommandLine.h"
24 #include "llvm/Support/ErrorHandling.h"
25 #include <utility>
26 
27 using namespace llvm;
28 using namespace PatternMatch;
29 
30 #define DEBUG_TYPE "tti"
31 
32 static cl::opt<bool> EnableReduxCost("costmodel-reduxcost", cl::init(false),
33                                      cl::Hidden,
34                                      cl::desc("Recognize reduction patterns."));
35 
36 namespace {
37 /// No-op implementation of the TTI interface using the utility base
38 /// classes.
39 ///
40 /// This is used when no target specific information is available.
41 struct NoTTIImpl : TargetTransformInfoImplCRTPBase<NoTTIImpl> {
42   explicit NoTTIImpl(const DataLayout &DL)
43       : TargetTransformInfoImplCRTPBase<NoTTIImpl>(DL) {}
44 };
45 } // namespace
46 
47 bool HardwareLoopInfo::canAnalyze(LoopInfo &LI) {
48   // If the loop has irreducible control flow, it can not be converted to
49   // Hardware loop.
50   LoopBlocksRPO RPOT(L);
51   RPOT.perform(&LI);
52   if (containsIrreducibleCFG<const BasicBlock *>(RPOT, LI))
53     return false;
54   return true;
55 }
56 
57 IntrinsicCostAttributes::IntrinsicCostAttributes(Intrinsic::ID Id,
58                                                  const CallBase &CI,
59                                                  unsigned ScalarizationCost)
60     : II(dyn_cast<IntrinsicInst>(&CI)), RetTy(CI.getType()), IID(Id),
61       ScalarizationCost(ScalarizationCost) {
62 
63   if (const auto *FPMO = dyn_cast<FPMathOperator>(&CI))
64     FMF = FPMO->getFastMathFlags();
65 
66   Arguments.insert(Arguments.begin(), CI.arg_begin(), CI.arg_end());
67   FunctionType *FTy = CI.getCalledFunction()->getFunctionType();
68   ParamTys.insert(ParamTys.begin(), FTy->param_begin(), FTy->param_end());
69 }
70 
71 IntrinsicCostAttributes::IntrinsicCostAttributes(Intrinsic::ID Id, Type *RTy,
72                                                  ArrayRef<Type *> Tys,
73                                                  FastMathFlags Flags,
74                                                  const IntrinsicInst *I,
75                                                  unsigned ScalarCost)
76     : II(I), RetTy(RTy), IID(Id), FMF(Flags), ScalarizationCost(ScalarCost) {
77   ParamTys.insert(ParamTys.begin(), Tys.begin(), Tys.end());
78 }
79 
80 IntrinsicCostAttributes::IntrinsicCostAttributes(Intrinsic::ID Id, Type *Ty,
81                                                  ArrayRef<const Value *> Args)
82     : RetTy(Ty), IID(Id) {
83 
84   Arguments.insert(Arguments.begin(), Args.begin(), Args.end());
85   ParamTys.reserve(Arguments.size());
86   for (unsigned Idx = 0, Size = Arguments.size(); Idx != Size; ++Idx)
87     ParamTys.push_back(Arguments[Idx]->getType());
88 }
89 
90 IntrinsicCostAttributes::IntrinsicCostAttributes(Intrinsic::ID Id, Type *RTy,
91                                                  ArrayRef<const Value *> Args,
92                                                  ArrayRef<Type *> Tys,
93                                                  FastMathFlags Flags,
94                                                  const IntrinsicInst *I,
95                                                  unsigned ScalarCost)
96     : II(I), RetTy(RTy), IID(Id), FMF(Flags), ScalarizationCost(ScalarCost) {
97   ParamTys.insert(ParamTys.begin(), Tys.begin(), Tys.end());
98   Arguments.insert(Arguments.begin(), Args.begin(), Args.end());
99 }
100 
101 bool HardwareLoopInfo::isHardwareLoopCandidate(ScalarEvolution &SE,
102                                                LoopInfo &LI, DominatorTree &DT,
103                                                bool ForceNestedLoop,
104                                                bool ForceHardwareLoopPHI) {
105   SmallVector<BasicBlock *, 4> ExitingBlocks;
106   L->getExitingBlocks(ExitingBlocks);
107 
108   for (BasicBlock *BB : ExitingBlocks) {
109     // If we pass the updated counter back through a phi, we need to know
110     // which latch the updated value will be coming from.
111     if (!L->isLoopLatch(BB)) {
112       if (ForceHardwareLoopPHI || CounterInReg)
113         continue;
114     }
115 
116     const SCEV *EC = SE.getExitCount(L, BB);
117     if (isa<SCEVCouldNotCompute>(EC))
118       continue;
119     if (const SCEVConstant *ConstEC = dyn_cast<SCEVConstant>(EC)) {
120       if (ConstEC->getValue()->isZero())
121         continue;
122     } else if (!SE.isLoopInvariant(EC, L))
123       continue;
124 
125     if (SE.getTypeSizeInBits(EC->getType()) > CountType->getBitWidth())
126       continue;
127 
128     // If this exiting block is contained in a nested loop, it is not eligible
129     // for insertion of the branch-and-decrement since the inner loop would
130     // end up messing up the value in the CTR.
131     if (!IsNestingLegal && LI.getLoopFor(BB) != L && !ForceNestedLoop)
132       continue;
133 
134     // We now have a loop-invariant count of loop iterations (which is not the
135     // constant zero) for which we know that this loop will not exit via this
136     // existing block.
137 
138     // We need to make sure that this block will run on every loop iteration.
139     // For this to be true, we must dominate all blocks with backedges. Such
140     // blocks are in-loop predecessors to the header block.
141     bool NotAlways = false;
142     for (BasicBlock *Pred : predecessors(L->getHeader())) {
143       if (!L->contains(Pred))
144         continue;
145 
146       if (!DT.dominates(BB, Pred)) {
147         NotAlways = true;
148         break;
149       }
150     }
151 
152     if (NotAlways)
153       continue;
154 
155     // Make sure this blocks ends with a conditional branch.
156     Instruction *TI = BB->getTerminator();
157     if (!TI)
158       continue;
159 
160     if (BranchInst *BI = dyn_cast<BranchInst>(TI)) {
161       if (!BI->isConditional())
162         continue;
163 
164       ExitBranch = BI;
165     } else
166       continue;
167 
168     // Note that this block may not be the loop latch block, even if the loop
169     // has a latch block.
170     ExitBlock = BB;
171     TripCount = SE.getAddExpr(EC, SE.getOne(EC->getType()));
172 
173     if (!EC->getType()->isPointerTy() && EC->getType() != CountType)
174       TripCount = SE.getZeroExtendExpr(TripCount, CountType);
175 
176     break;
177   }
178 
179   if (!ExitBlock)
180     return false;
181   return true;
182 }
183 
184 TargetTransformInfo::TargetTransformInfo(const DataLayout &DL)
185     : TTIImpl(new Model<NoTTIImpl>(NoTTIImpl(DL))) {}
186 
187 TargetTransformInfo::~TargetTransformInfo() {}
188 
189 TargetTransformInfo::TargetTransformInfo(TargetTransformInfo &&Arg)
190     : TTIImpl(std::move(Arg.TTIImpl)) {}
191 
192 TargetTransformInfo &TargetTransformInfo::operator=(TargetTransformInfo &&RHS) {
193   TTIImpl = std::move(RHS.TTIImpl);
194   return *this;
195 }
196 
197 unsigned TargetTransformInfo::getInliningThresholdMultiplier() const {
198   return TTIImpl->getInliningThresholdMultiplier();
199 }
200 
201 unsigned
202 TargetTransformInfo::adjustInliningThreshold(const CallBase *CB) const {
203   return TTIImpl->adjustInliningThreshold(CB);
204 }
205 
206 int TargetTransformInfo::getInlinerVectorBonusPercent() const {
207   return TTIImpl->getInlinerVectorBonusPercent();
208 }
209 
210 int TargetTransformInfo::getGEPCost(Type *PointeeType, const Value *Ptr,
211                                     ArrayRef<const Value *> Operands,
212                                     TTI::TargetCostKind CostKind) const {
213   return TTIImpl->getGEPCost(PointeeType, Ptr, Operands, CostKind);
214 }
215 
216 unsigned TargetTransformInfo::getEstimatedNumberOfCaseClusters(
217     const SwitchInst &SI, unsigned &JTSize, ProfileSummaryInfo *PSI,
218     BlockFrequencyInfo *BFI) const {
219   return TTIImpl->getEstimatedNumberOfCaseClusters(SI, JTSize, PSI, BFI);
220 }
221 
222 int TargetTransformInfo::getUserCost(const User *U,
223                                      ArrayRef<const Value *> Operands,
224                                      enum TargetCostKind CostKind) const {
225   int Cost = TTIImpl->getUserCost(U, Operands, CostKind);
226   assert((CostKind == TTI::TCK_RecipThroughput || Cost >= 0) &&
227          "TTI should not produce negative costs!");
228   return Cost;
229 }
230 
231 BranchProbability TargetTransformInfo::getPredictableBranchThreshold() const {
232   return TTIImpl->getPredictableBranchThreshold();
233 }
234 
235 bool TargetTransformInfo::hasBranchDivergence() const {
236   return TTIImpl->hasBranchDivergence();
237 }
238 
239 bool TargetTransformInfo::useGPUDivergenceAnalysis() const {
240   return TTIImpl->useGPUDivergenceAnalysis();
241 }
242 
243 bool TargetTransformInfo::isSourceOfDivergence(const Value *V) const {
244   return TTIImpl->isSourceOfDivergence(V);
245 }
246 
247 bool llvm::TargetTransformInfo::isAlwaysUniform(const Value *V) const {
248   return TTIImpl->isAlwaysUniform(V);
249 }
250 
251 unsigned TargetTransformInfo::getFlatAddressSpace() const {
252   return TTIImpl->getFlatAddressSpace();
253 }
254 
255 bool TargetTransformInfo::collectFlatAddressOperands(
256     SmallVectorImpl<int> &OpIndexes, Intrinsic::ID IID) const {
257   return TTIImpl->collectFlatAddressOperands(OpIndexes, IID);
258 }
259 
260 bool TargetTransformInfo::isNoopAddrSpaceCast(unsigned FromAS,
261                                               unsigned ToAS) const {
262   return TTIImpl->isNoopAddrSpaceCast(FromAS, ToAS);
263 }
264 
265 unsigned TargetTransformInfo::getAssumedAddrSpace(const Value *V) const {
266   return TTIImpl->getAssumedAddrSpace(V);
267 }
268 
269 Value *TargetTransformInfo::rewriteIntrinsicWithAddressSpace(
270     IntrinsicInst *II, Value *OldV, Value *NewV) const {
271   return TTIImpl->rewriteIntrinsicWithAddressSpace(II, OldV, NewV);
272 }
273 
274 bool TargetTransformInfo::isLoweredToCall(const Function *F) const {
275   return TTIImpl->isLoweredToCall(F);
276 }
277 
278 bool TargetTransformInfo::isHardwareLoopProfitable(
279     Loop *L, ScalarEvolution &SE, AssumptionCache &AC,
280     TargetLibraryInfo *LibInfo, HardwareLoopInfo &HWLoopInfo) const {
281   return TTIImpl->isHardwareLoopProfitable(L, SE, AC, LibInfo, HWLoopInfo);
282 }
283 
284 bool TargetTransformInfo::preferPredicateOverEpilogue(
285     Loop *L, LoopInfo *LI, ScalarEvolution &SE, AssumptionCache &AC,
286     TargetLibraryInfo *TLI, DominatorTree *DT,
287     const LoopAccessInfo *LAI) const {
288   return TTIImpl->preferPredicateOverEpilogue(L, LI, SE, AC, TLI, DT, LAI);
289 }
290 
291 bool TargetTransformInfo::emitGetActiveLaneMask() const {
292   return TTIImpl->emitGetActiveLaneMask();
293 }
294 
295 Optional<Instruction *>
296 TargetTransformInfo::instCombineIntrinsic(InstCombiner &IC,
297                                           IntrinsicInst &II) const {
298   return TTIImpl->instCombineIntrinsic(IC, II);
299 }
300 
301 Optional<Value *> TargetTransformInfo::simplifyDemandedUseBitsIntrinsic(
302     InstCombiner &IC, IntrinsicInst &II, APInt DemandedMask, KnownBits &Known,
303     bool &KnownBitsComputed) const {
304   return TTIImpl->simplifyDemandedUseBitsIntrinsic(IC, II, DemandedMask, Known,
305                                                    KnownBitsComputed);
306 }
307 
308 Optional<Value *> TargetTransformInfo::simplifyDemandedVectorEltsIntrinsic(
309     InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts,
310     APInt &UndefElts2, APInt &UndefElts3,
311     std::function<void(Instruction *, unsigned, APInt, APInt &)>
312         SimplifyAndSetOp) const {
313   return TTIImpl->simplifyDemandedVectorEltsIntrinsic(
314       IC, II, DemandedElts, UndefElts, UndefElts2, UndefElts3,
315       SimplifyAndSetOp);
316 }
317 
318 void TargetTransformInfo::getUnrollingPreferences(
319     Loop *L, ScalarEvolution &SE, UnrollingPreferences &UP) const {
320   return TTIImpl->getUnrollingPreferences(L, SE, UP);
321 }
322 
323 void TargetTransformInfo::getPeelingPreferences(Loop *L, ScalarEvolution &SE,
324                                                 PeelingPreferences &PP) const {
325   return TTIImpl->getPeelingPreferences(L, SE, PP);
326 }
327 
328 bool TargetTransformInfo::isLegalAddImmediate(int64_t Imm) const {
329   return TTIImpl->isLegalAddImmediate(Imm);
330 }
331 
332 bool TargetTransformInfo::isLegalICmpImmediate(int64_t Imm) const {
333   return TTIImpl->isLegalICmpImmediate(Imm);
334 }
335 
336 bool TargetTransformInfo::isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV,
337                                                 int64_t BaseOffset,
338                                                 bool HasBaseReg, int64_t Scale,
339                                                 unsigned AddrSpace,
340                                                 Instruction *I) const {
341   return TTIImpl->isLegalAddressingMode(Ty, BaseGV, BaseOffset, HasBaseReg,
342                                         Scale, AddrSpace, I);
343 }
344 
345 bool TargetTransformInfo::isLSRCostLess(LSRCost &C1, LSRCost &C2) const {
346   return TTIImpl->isLSRCostLess(C1, C2);
347 }
348 
349 bool TargetTransformInfo::isNumRegsMajorCostOfLSR() const {
350   return TTIImpl->isNumRegsMajorCostOfLSR();
351 }
352 
353 bool TargetTransformInfo::isProfitableLSRChainElement(Instruction *I) const {
354   return TTIImpl->isProfitableLSRChainElement(I);
355 }
356 
357 bool TargetTransformInfo::canMacroFuseCmp() const {
358   return TTIImpl->canMacroFuseCmp();
359 }
360 
361 bool TargetTransformInfo::canSaveCmp(Loop *L, BranchInst **BI,
362                                      ScalarEvolution *SE, LoopInfo *LI,
363                                      DominatorTree *DT, AssumptionCache *AC,
364                                      TargetLibraryInfo *LibInfo) const {
365   return TTIImpl->canSaveCmp(L, BI, SE, LI, DT, AC, LibInfo);
366 }
367 
368 TTI::AddressingModeKind
369 TargetTransformInfo::getPreferredAddressingMode(const Loop *L,
370                                                 ScalarEvolution *SE) const {
371   return TTIImpl->getPreferredAddressingMode(L, SE);
372 }
373 
374 bool TargetTransformInfo::isLegalMaskedStore(Type *DataType,
375                                              Align Alignment) const {
376   return TTIImpl->isLegalMaskedStore(DataType, Alignment);
377 }
378 
379 bool TargetTransformInfo::isLegalMaskedLoad(Type *DataType,
380                                             Align Alignment) const {
381   return TTIImpl->isLegalMaskedLoad(DataType, Alignment);
382 }
383 
384 bool TargetTransformInfo::isLegalNTStore(Type *DataType,
385                                          Align Alignment) const {
386   return TTIImpl->isLegalNTStore(DataType, Alignment);
387 }
388 
389 bool TargetTransformInfo::isLegalNTLoad(Type *DataType, Align Alignment) const {
390   return TTIImpl->isLegalNTLoad(DataType, Alignment);
391 }
392 
393 bool TargetTransformInfo::isLegalMaskedGather(Type *DataType,
394                                               Align Alignment) const {
395   return TTIImpl->isLegalMaskedGather(DataType, Alignment);
396 }
397 
398 bool TargetTransformInfo::isLegalMaskedScatter(Type *DataType,
399                                                Align Alignment) const {
400   return TTIImpl->isLegalMaskedScatter(DataType, Alignment);
401 }
402 
403 bool TargetTransformInfo::isLegalMaskedCompressStore(Type *DataType) const {
404   return TTIImpl->isLegalMaskedCompressStore(DataType);
405 }
406 
407 bool TargetTransformInfo::isLegalMaskedExpandLoad(Type *DataType) const {
408   return TTIImpl->isLegalMaskedExpandLoad(DataType);
409 }
410 
411 bool TargetTransformInfo::hasDivRemOp(Type *DataType, bool IsSigned) const {
412   return TTIImpl->hasDivRemOp(DataType, IsSigned);
413 }
414 
415 bool TargetTransformInfo::hasVolatileVariant(Instruction *I,
416                                              unsigned AddrSpace) const {
417   return TTIImpl->hasVolatileVariant(I, AddrSpace);
418 }
419 
420 bool TargetTransformInfo::prefersVectorizedAddressing() const {
421   return TTIImpl->prefersVectorizedAddressing();
422 }
423 
424 int TargetTransformInfo::getScalingFactorCost(Type *Ty, GlobalValue *BaseGV,
425                                               int64_t BaseOffset,
426                                               bool HasBaseReg, int64_t Scale,
427                                               unsigned AddrSpace) const {
428   int Cost = TTIImpl->getScalingFactorCost(Ty, BaseGV, BaseOffset, HasBaseReg,
429                                            Scale, AddrSpace);
430   assert(Cost >= 0 && "TTI should not produce negative costs!");
431   return Cost;
432 }
433 
434 bool TargetTransformInfo::LSRWithInstrQueries() const {
435   return TTIImpl->LSRWithInstrQueries();
436 }
437 
438 bool TargetTransformInfo::isTruncateFree(Type *Ty1, Type *Ty2) const {
439   return TTIImpl->isTruncateFree(Ty1, Ty2);
440 }
441 
442 bool TargetTransformInfo::isProfitableToHoist(Instruction *I) const {
443   return TTIImpl->isProfitableToHoist(I);
444 }
445 
446 bool TargetTransformInfo::useAA() const { return TTIImpl->useAA(); }
447 
448 bool TargetTransformInfo::isTypeLegal(Type *Ty) const {
449   return TTIImpl->isTypeLegal(Ty);
450 }
451 
452 unsigned TargetTransformInfo::getRegUsageForType(Type *Ty) const {
453   return TTIImpl->getRegUsageForType(Ty);
454 }
455 
456 bool TargetTransformInfo::shouldBuildLookupTables() const {
457   return TTIImpl->shouldBuildLookupTables();
458 }
459 bool TargetTransformInfo::shouldBuildLookupTablesForConstant(
460     Constant *C) const {
461   return TTIImpl->shouldBuildLookupTablesForConstant(C);
462 }
463 
464 bool TargetTransformInfo::useColdCCForColdCall(Function &F) const {
465   return TTIImpl->useColdCCForColdCall(F);
466 }
467 
468 unsigned
469 TargetTransformInfo::getScalarizationOverhead(VectorType *Ty,
470                                               const APInt &DemandedElts,
471                                               bool Insert, bool Extract) const {
472   return TTIImpl->getScalarizationOverhead(Ty, DemandedElts, Insert, Extract);
473 }
474 
475 unsigned TargetTransformInfo::getOperandsScalarizationOverhead(
476     ArrayRef<const Value *> Args, ArrayRef<Type *> Tys) const {
477   return TTIImpl->getOperandsScalarizationOverhead(Args, Tys);
478 }
479 
480 bool TargetTransformInfo::supportsEfficientVectorElementLoadStore() const {
481   return TTIImpl->supportsEfficientVectorElementLoadStore();
482 }
483 
484 bool TargetTransformInfo::enableAggressiveInterleaving(
485     bool LoopHasReductions) const {
486   return TTIImpl->enableAggressiveInterleaving(LoopHasReductions);
487 }
488 
489 TargetTransformInfo::MemCmpExpansionOptions
490 TargetTransformInfo::enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const {
491   return TTIImpl->enableMemCmpExpansion(OptSize, IsZeroCmp);
492 }
493 
494 bool TargetTransformInfo::enableInterleavedAccessVectorization() const {
495   return TTIImpl->enableInterleavedAccessVectorization();
496 }
497 
498 bool TargetTransformInfo::enableMaskedInterleavedAccessVectorization() const {
499   return TTIImpl->enableMaskedInterleavedAccessVectorization();
500 }
501 
502 bool TargetTransformInfo::isFPVectorizationPotentiallyUnsafe() const {
503   return TTIImpl->isFPVectorizationPotentiallyUnsafe();
504 }
505 
506 bool TargetTransformInfo::allowsMisalignedMemoryAccesses(LLVMContext &Context,
507                                                          unsigned BitWidth,
508                                                          unsigned AddressSpace,
509                                                          Align Alignment,
510                                                          bool *Fast) const {
511   return TTIImpl->allowsMisalignedMemoryAccesses(Context, BitWidth,
512                                                  AddressSpace, Alignment, Fast);
513 }
514 
515 TargetTransformInfo::PopcntSupportKind
516 TargetTransformInfo::getPopcntSupport(unsigned IntTyWidthInBit) const {
517   return TTIImpl->getPopcntSupport(IntTyWidthInBit);
518 }
519 
520 bool TargetTransformInfo::haveFastSqrt(Type *Ty) const {
521   return TTIImpl->haveFastSqrt(Ty);
522 }
523 
524 bool TargetTransformInfo::isFCmpOrdCheaperThanFCmpZero(Type *Ty) const {
525   return TTIImpl->isFCmpOrdCheaperThanFCmpZero(Ty);
526 }
527 
528 int TargetTransformInfo::getFPOpCost(Type *Ty) const {
529   int Cost = TTIImpl->getFPOpCost(Ty);
530   assert(Cost >= 0 && "TTI should not produce negative costs!");
531   return Cost;
532 }
533 
534 int TargetTransformInfo::getIntImmCodeSizeCost(unsigned Opcode, unsigned Idx,
535                                                const APInt &Imm,
536                                                Type *Ty) const {
537   int Cost = TTIImpl->getIntImmCodeSizeCost(Opcode, Idx, Imm, Ty);
538   assert(Cost >= 0 && "TTI should not produce negative costs!");
539   return Cost;
540 }
541 
542 int TargetTransformInfo::getIntImmCost(const APInt &Imm, Type *Ty,
543                                        TTI::TargetCostKind CostKind) const {
544   int Cost = TTIImpl->getIntImmCost(Imm, Ty, CostKind);
545   assert(Cost >= 0 && "TTI should not produce negative costs!");
546   return Cost;
547 }
548 
549 int TargetTransformInfo::getIntImmCostInst(unsigned Opcode, unsigned Idx,
550                                            const APInt &Imm, Type *Ty,
551                                            TTI::TargetCostKind CostKind,
552                                            Instruction *Inst) const {
553   int Cost = TTIImpl->getIntImmCostInst(Opcode, Idx, Imm, Ty, CostKind, Inst);
554   assert(Cost >= 0 && "TTI should not produce negative costs!");
555   return Cost;
556 }
557 
558 int
559 TargetTransformInfo::getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx,
560                                          const APInt &Imm, Type *Ty,
561                                          TTI::TargetCostKind CostKind) const {
562   int Cost = TTIImpl->getIntImmCostIntrin(IID, Idx, Imm, Ty, CostKind);
563   assert(Cost >= 0 && "TTI should not produce negative costs!");
564   return Cost;
565 }
566 
567 unsigned TargetTransformInfo::getNumberOfRegisters(unsigned ClassID) const {
568   return TTIImpl->getNumberOfRegisters(ClassID);
569 }
570 
571 unsigned TargetTransformInfo::getRegisterClassForType(bool Vector,
572                                                       Type *Ty) const {
573   return TTIImpl->getRegisterClassForType(Vector, Ty);
574 }
575 
576 const char *TargetTransformInfo::getRegisterClassName(unsigned ClassID) const {
577   return TTIImpl->getRegisterClassName(ClassID);
578 }
579 
580 unsigned TargetTransformInfo::getRegisterBitWidth(bool Vector) const {
581   return TTIImpl->getRegisterBitWidth(Vector);
582 }
583 
584 unsigned TargetTransformInfo::getMinVectorRegisterBitWidth() const {
585   return TTIImpl->getMinVectorRegisterBitWidth();
586 }
587 
588 Optional<unsigned> TargetTransformInfo::getMaxVScale() const {
589   return TTIImpl->getMaxVScale();
590 }
591 
592 bool TargetTransformInfo::shouldMaximizeVectorBandwidth(bool OptSize) const {
593   return TTIImpl->shouldMaximizeVectorBandwidth(OptSize);
594 }
595 
596 ElementCount TargetTransformInfo::getMinimumVF(unsigned ElemWidth,
597                                                bool IsScalable) const {
598   return TTIImpl->getMinimumVF(ElemWidth, IsScalable);
599 }
600 
601 unsigned TargetTransformInfo::getMaximumVF(unsigned ElemWidth,
602                                            unsigned Opcode) const {
603   return TTIImpl->getMaximumVF(ElemWidth, Opcode);
604 }
605 
606 bool TargetTransformInfo::shouldConsiderAddressTypePromotion(
607     const Instruction &I, bool &AllowPromotionWithoutCommonHeader) const {
608   return TTIImpl->shouldConsiderAddressTypePromotion(
609       I, AllowPromotionWithoutCommonHeader);
610 }
611 
612 unsigned TargetTransformInfo::getCacheLineSize() const {
613   return TTIImpl->getCacheLineSize();
614 }
615 
616 llvm::Optional<unsigned>
617 TargetTransformInfo::getCacheSize(CacheLevel Level) const {
618   return TTIImpl->getCacheSize(Level);
619 }
620 
621 llvm::Optional<unsigned>
622 TargetTransformInfo::getCacheAssociativity(CacheLevel Level) const {
623   return TTIImpl->getCacheAssociativity(Level);
624 }
625 
626 unsigned TargetTransformInfo::getPrefetchDistance() const {
627   return TTIImpl->getPrefetchDistance();
628 }
629 
630 unsigned TargetTransformInfo::getMinPrefetchStride(
631     unsigned NumMemAccesses, unsigned NumStridedMemAccesses,
632     unsigned NumPrefetches, bool HasCall) const {
633   return TTIImpl->getMinPrefetchStride(NumMemAccesses, NumStridedMemAccesses,
634                                        NumPrefetches, HasCall);
635 }
636 
637 unsigned TargetTransformInfo::getMaxPrefetchIterationsAhead() const {
638   return TTIImpl->getMaxPrefetchIterationsAhead();
639 }
640 
641 bool TargetTransformInfo::enableWritePrefetching() const {
642   return TTIImpl->enableWritePrefetching();
643 }
644 
645 unsigned TargetTransformInfo::getMaxInterleaveFactor(unsigned VF) const {
646   return TTIImpl->getMaxInterleaveFactor(VF);
647 }
648 
649 TargetTransformInfo::OperandValueKind
650 TargetTransformInfo::getOperandInfo(const Value *V,
651                                     OperandValueProperties &OpProps) {
652   OperandValueKind OpInfo = OK_AnyValue;
653   OpProps = OP_None;
654 
655   if (const auto *CI = dyn_cast<ConstantInt>(V)) {
656     if (CI->getValue().isPowerOf2())
657       OpProps = OP_PowerOf2;
658     return OK_UniformConstantValue;
659   }
660 
661   // A broadcast shuffle creates a uniform value.
662   // TODO: Add support for non-zero index broadcasts.
663   // TODO: Add support for different source vector width.
664   if (const auto *ShuffleInst = dyn_cast<ShuffleVectorInst>(V))
665     if (ShuffleInst->isZeroEltSplat())
666       OpInfo = OK_UniformValue;
667 
668   const Value *Splat = getSplatValue(V);
669 
670   // Check for a splat of a constant or for a non uniform vector of constants
671   // and check if the constant(s) are all powers of two.
672   if (isa<ConstantVector>(V) || isa<ConstantDataVector>(V)) {
673     OpInfo = OK_NonUniformConstantValue;
674     if (Splat) {
675       OpInfo = OK_UniformConstantValue;
676       if (auto *CI = dyn_cast<ConstantInt>(Splat))
677         if (CI->getValue().isPowerOf2())
678           OpProps = OP_PowerOf2;
679     } else if (const auto *CDS = dyn_cast<ConstantDataSequential>(V)) {
680       OpProps = OP_PowerOf2;
681       for (unsigned I = 0, E = CDS->getNumElements(); I != E; ++I) {
682         if (auto *CI = dyn_cast<ConstantInt>(CDS->getElementAsConstant(I)))
683           if (CI->getValue().isPowerOf2())
684             continue;
685         OpProps = OP_None;
686         break;
687       }
688     }
689   }
690 
691   // Check for a splat of a uniform value. This is not loop aware, so return
692   // true only for the obviously uniform cases (argument, globalvalue)
693   if (Splat && (isa<Argument>(Splat) || isa<GlobalValue>(Splat)))
694     OpInfo = OK_UniformValue;
695 
696   return OpInfo;
697 }
698 
699 int TargetTransformInfo::getArithmeticInstrCost(
700     unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind,
701     OperandValueKind Opd1Info,
702     OperandValueKind Opd2Info, OperandValueProperties Opd1PropInfo,
703     OperandValueProperties Opd2PropInfo, ArrayRef<const Value *> Args,
704     const Instruction *CxtI) const {
705   int Cost = TTIImpl->getArithmeticInstrCost(
706       Opcode, Ty, CostKind, Opd1Info, Opd2Info, Opd1PropInfo, Opd2PropInfo,
707       Args, CxtI);
708   assert(Cost >= 0 && "TTI should not produce negative costs!");
709   return Cost;
710 }
711 
712 int TargetTransformInfo::getShuffleCost(ShuffleKind Kind, VectorType *Ty,
713                                         ArrayRef<int> Mask, int Index,
714                                         VectorType *SubTp) const {
715   int Cost = TTIImpl->getShuffleCost(Kind, Ty, Mask, Index, SubTp);
716   assert(Cost >= 0 && "TTI should not produce negative costs!");
717   return Cost;
718 }
719 
720 TTI::CastContextHint
721 TargetTransformInfo::getCastContextHint(const Instruction *I) {
722   if (!I)
723     return CastContextHint::None;
724 
725   auto getLoadStoreKind = [](const Value *V, unsigned LdStOp, unsigned MaskedOp,
726                              unsigned GatScatOp) {
727     const Instruction *I = dyn_cast<Instruction>(V);
728     if (!I)
729       return CastContextHint::None;
730 
731     if (I->getOpcode() == LdStOp)
732       return CastContextHint::Normal;
733 
734     if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
735       if (II->getIntrinsicID() == MaskedOp)
736         return TTI::CastContextHint::Masked;
737       if (II->getIntrinsicID() == GatScatOp)
738         return TTI::CastContextHint::GatherScatter;
739     }
740 
741     return TTI::CastContextHint::None;
742   };
743 
744   switch (I->getOpcode()) {
745   case Instruction::ZExt:
746   case Instruction::SExt:
747   case Instruction::FPExt:
748     return getLoadStoreKind(I->getOperand(0), Instruction::Load,
749                             Intrinsic::masked_load, Intrinsic::masked_gather);
750   case Instruction::Trunc:
751   case Instruction::FPTrunc:
752     if (I->hasOneUse())
753       return getLoadStoreKind(*I->user_begin(), Instruction::Store,
754                               Intrinsic::masked_store,
755                               Intrinsic::masked_scatter);
756     break;
757   default:
758     return CastContextHint::None;
759   }
760 
761   return TTI::CastContextHint::None;
762 }
763 
764 int TargetTransformInfo::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
765                                           CastContextHint CCH,
766                                           TTI::TargetCostKind CostKind,
767                                           const Instruction *I) const {
768   assert((I == nullptr || I->getOpcode() == Opcode) &&
769          "Opcode should reflect passed instruction.");
770   int Cost = TTIImpl->getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I);
771   assert(Cost >= 0 && "TTI should not produce negative costs!");
772   return Cost;
773 }
774 
775 int TargetTransformInfo::getExtractWithExtendCost(unsigned Opcode, Type *Dst,
776                                                   VectorType *VecTy,
777                                                   unsigned Index) const {
778   int Cost = TTIImpl->getExtractWithExtendCost(Opcode, Dst, VecTy, Index);
779   assert(Cost >= 0 && "TTI should not produce negative costs!");
780   return Cost;
781 }
782 
783 int TargetTransformInfo::getCFInstrCost(unsigned Opcode,
784                                         TTI::TargetCostKind CostKind) const {
785   int Cost = TTIImpl->getCFInstrCost(Opcode, CostKind);
786   assert(Cost >= 0 && "TTI should not produce negative costs!");
787   return Cost;
788 }
789 
790 int TargetTransformInfo::getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
791                                             Type *CondTy,
792                                             CmpInst::Predicate VecPred,
793                                             TTI::TargetCostKind CostKind,
794                                             const Instruction *I) const {
795   assert((I == nullptr || I->getOpcode() == Opcode) &&
796          "Opcode should reflect passed instruction.");
797   int Cost =
798       TTIImpl->getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind, I);
799   assert(Cost >= 0 && "TTI should not produce negative costs!");
800   return Cost;
801 }
802 
803 int TargetTransformInfo::getVectorInstrCost(unsigned Opcode, Type *Val,
804                                             unsigned Index) const {
805   int Cost = TTIImpl->getVectorInstrCost(Opcode, Val, Index);
806   assert(Cost >= 0 && "TTI should not produce negative costs!");
807   return Cost;
808 }
809 
810 int TargetTransformInfo::getMemoryOpCost(unsigned Opcode, Type *Src,
811                                          Align Alignment, unsigned AddressSpace,
812                                          TTI::TargetCostKind CostKind,
813                                          const Instruction *I) const {
814   assert((I == nullptr || I->getOpcode() == Opcode) &&
815          "Opcode should reflect passed instruction.");
816   int Cost = TTIImpl->getMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
817                                       CostKind, I);
818   assert(Cost >= 0 && "TTI should not produce negative costs!");
819   return Cost;
820 }
821 
822 int TargetTransformInfo::getMaskedMemoryOpCost(
823     unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace,
824     TTI::TargetCostKind CostKind) const {
825   int Cost =
826       TTIImpl->getMaskedMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
827                                      CostKind);
828   assert(Cost >= 0 && "TTI should not produce negative costs!");
829   return Cost;
830 }
831 
832 int TargetTransformInfo::getGatherScatterOpCost(
833     unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask,
834     Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I) const {
835   int Cost = TTIImpl->getGatherScatterOpCost(Opcode, DataTy, Ptr, VariableMask,
836                                              Alignment, CostKind, I);
837   assert(Cost >= 0 && "TTI should not produce negative costs!");
838   return Cost;
839 }
840 
841 int TargetTransformInfo::getInterleavedMemoryOpCost(
842     unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
843     Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
844     bool UseMaskForCond, bool UseMaskForGaps) const {
845   int Cost = TTIImpl->getInterleavedMemoryOpCost(
846       Opcode, VecTy, Factor, Indices, Alignment, AddressSpace, CostKind,
847       UseMaskForCond, UseMaskForGaps);
848   assert(Cost >= 0 && "TTI should not produce negative costs!");
849   return Cost;
850 }
851 
852 int
853 TargetTransformInfo::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
854                                            TTI::TargetCostKind CostKind) const {
855   int Cost = TTIImpl->getIntrinsicInstrCost(ICA, CostKind);
856   assert(Cost >= 0 && "TTI should not produce negative costs!");
857   return Cost;
858 }
859 
860 int TargetTransformInfo::getCallInstrCost(Function *F, Type *RetTy,
861                                           ArrayRef<Type *> Tys,
862                                           TTI::TargetCostKind CostKind) const {
863   int Cost = TTIImpl->getCallInstrCost(F, RetTy, Tys, CostKind);
864   assert(Cost >= 0 && "TTI should not produce negative costs!");
865   return Cost;
866 }
867 
868 unsigned TargetTransformInfo::getNumberOfParts(Type *Tp) const {
869   return TTIImpl->getNumberOfParts(Tp);
870 }
871 
872 int TargetTransformInfo::getAddressComputationCost(Type *Tp,
873                                                    ScalarEvolution *SE,
874                                                    const SCEV *Ptr) const {
875   int Cost = TTIImpl->getAddressComputationCost(Tp, SE, Ptr);
876   assert(Cost >= 0 && "TTI should not produce negative costs!");
877   return Cost;
878 }
879 
880 int TargetTransformInfo::getMemcpyCost(const Instruction *I) const {
881   int Cost = TTIImpl->getMemcpyCost(I);
882   assert(Cost >= 0 && "TTI should not produce negative costs!");
883   return Cost;
884 }
885 
886 int TargetTransformInfo::getArithmeticReductionCost(unsigned Opcode,
887                                                     VectorType *Ty,
888                                                     bool IsPairwiseForm,
889                                                     TTI::TargetCostKind CostKind) const {
890   int Cost = TTIImpl->getArithmeticReductionCost(Opcode, Ty, IsPairwiseForm,
891                                                  CostKind);
892   assert(Cost >= 0 && "TTI should not produce negative costs!");
893   return Cost;
894 }
895 
896 int TargetTransformInfo::getMinMaxReductionCost(
897     VectorType *Ty, VectorType *CondTy, bool IsPairwiseForm, bool IsUnsigned,
898     TTI::TargetCostKind CostKind) const {
899   int Cost =
900       TTIImpl->getMinMaxReductionCost(Ty, CondTy, IsPairwiseForm, IsUnsigned,
901                                       CostKind);
902   assert(Cost >= 0 && "TTI should not produce negative costs!");
903   return Cost;
904 }
905 
906 InstructionCost TargetTransformInfo::getExtendedAddReductionCost(
907     bool IsMLA, bool IsUnsigned, Type *ResTy, VectorType *Ty,
908     TTI::TargetCostKind CostKind) const {
909   return TTIImpl->getExtendedAddReductionCost(IsMLA, IsUnsigned, ResTy, Ty,
910                                               CostKind);
911 }
912 
913 unsigned
914 TargetTransformInfo::getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys) const {
915   return TTIImpl->getCostOfKeepingLiveOverCall(Tys);
916 }
917 
918 bool TargetTransformInfo::getTgtMemIntrinsic(IntrinsicInst *Inst,
919                                              MemIntrinsicInfo &Info) const {
920   return TTIImpl->getTgtMemIntrinsic(Inst, Info);
921 }
922 
923 unsigned TargetTransformInfo::getAtomicMemIntrinsicMaxElementSize() const {
924   return TTIImpl->getAtomicMemIntrinsicMaxElementSize();
925 }
926 
927 Value *TargetTransformInfo::getOrCreateResultFromMemIntrinsic(
928     IntrinsicInst *Inst, Type *ExpectedType) const {
929   return TTIImpl->getOrCreateResultFromMemIntrinsic(Inst, ExpectedType);
930 }
931 
932 Type *TargetTransformInfo::getMemcpyLoopLoweringType(
933     LLVMContext &Context, Value *Length, unsigned SrcAddrSpace,
934     unsigned DestAddrSpace, unsigned SrcAlign, unsigned DestAlign) const {
935   return TTIImpl->getMemcpyLoopLoweringType(Context, Length, SrcAddrSpace,
936                                             DestAddrSpace, SrcAlign, DestAlign);
937 }
938 
939 void TargetTransformInfo::getMemcpyLoopResidualLoweringType(
940     SmallVectorImpl<Type *> &OpsOut, LLVMContext &Context,
941     unsigned RemainingBytes, unsigned SrcAddrSpace, unsigned DestAddrSpace,
942     unsigned SrcAlign, unsigned DestAlign) const {
943   TTIImpl->getMemcpyLoopResidualLoweringType(OpsOut, Context, RemainingBytes,
944                                              SrcAddrSpace, DestAddrSpace,
945                                              SrcAlign, DestAlign);
946 }
947 
948 bool TargetTransformInfo::areInlineCompatible(const Function *Caller,
949                                               const Function *Callee) const {
950   return TTIImpl->areInlineCompatible(Caller, Callee);
951 }
952 
953 bool TargetTransformInfo::areFunctionArgsABICompatible(
954     const Function *Caller, const Function *Callee,
955     SmallPtrSetImpl<Argument *> &Args) const {
956   return TTIImpl->areFunctionArgsABICompatible(Caller, Callee, Args);
957 }
958 
959 bool TargetTransformInfo::isIndexedLoadLegal(MemIndexedMode Mode,
960                                              Type *Ty) const {
961   return TTIImpl->isIndexedLoadLegal(Mode, Ty);
962 }
963 
964 bool TargetTransformInfo::isIndexedStoreLegal(MemIndexedMode Mode,
965                                               Type *Ty) const {
966   return TTIImpl->isIndexedStoreLegal(Mode, Ty);
967 }
968 
969 unsigned TargetTransformInfo::getLoadStoreVecRegBitWidth(unsigned AS) const {
970   return TTIImpl->getLoadStoreVecRegBitWidth(AS);
971 }
972 
973 bool TargetTransformInfo::isLegalToVectorizeLoad(LoadInst *LI) const {
974   return TTIImpl->isLegalToVectorizeLoad(LI);
975 }
976 
977 bool TargetTransformInfo::isLegalToVectorizeStore(StoreInst *SI) const {
978   return TTIImpl->isLegalToVectorizeStore(SI);
979 }
980 
981 bool TargetTransformInfo::isLegalToVectorizeLoadChain(
982     unsigned ChainSizeInBytes, Align Alignment, unsigned AddrSpace) const {
983   return TTIImpl->isLegalToVectorizeLoadChain(ChainSizeInBytes, Alignment,
984                                               AddrSpace);
985 }
986 
987 bool TargetTransformInfo::isLegalToVectorizeStoreChain(
988     unsigned ChainSizeInBytes, Align Alignment, unsigned AddrSpace) const {
989   return TTIImpl->isLegalToVectorizeStoreChain(ChainSizeInBytes, Alignment,
990                                                AddrSpace);
991 }
992 
993 bool TargetTransformInfo::isLegalToVectorizeReduction(
994     RecurrenceDescriptor RdxDesc, ElementCount VF) const {
995   return TTIImpl->isLegalToVectorizeReduction(RdxDesc, VF);
996 }
997 
998 unsigned TargetTransformInfo::getLoadVectorFactor(unsigned VF,
999                                                   unsigned LoadSize,
1000                                                   unsigned ChainSizeInBytes,
1001                                                   VectorType *VecTy) const {
1002   return TTIImpl->getLoadVectorFactor(VF, LoadSize, ChainSizeInBytes, VecTy);
1003 }
1004 
1005 unsigned TargetTransformInfo::getStoreVectorFactor(unsigned VF,
1006                                                    unsigned StoreSize,
1007                                                    unsigned ChainSizeInBytes,
1008                                                    VectorType *VecTy) const {
1009   return TTIImpl->getStoreVectorFactor(VF, StoreSize, ChainSizeInBytes, VecTy);
1010 }
1011 
1012 bool TargetTransformInfo::preferInLoopReduction(unsigned Opcode, Type *Ty,
1013                                                 ReductionFlags Flags) const {
1014   return TTIImpl->preferInLoopReduction(Opcode, Ty, Flags);
1015 }
1016 
1017 bool TargetTransformInfo::preferPredicatedReductionSelect(
1018     unsigned Opcode, Type *Ty, ReductionFlags Flags) const {
1019   return TTIImpl->preferPredicatedReductionSelect(Opcode, Ty, Flags);
1020 }
1021 
1022 bool TargetTransformInfo::shouldExpandReduction(const IntrinsicInst *II) const {
1023   return TTIImpl->shouldExpandReduction(II);
1024 }
1025 
1026 unsigned TargetTransformInfo::getGISelRematGlobalCost() const {
1027   return TTIImpl->getGISelRematGlobalCost();
1028 }
1029 
1030 bool TargetTransformInfo::supportsScalableVectors() const {
1031   return TTIImpl->supportsScalableVectors();
1032 }
1033 
1034 int TargetTransformInfo::getInstructionLatency(const Instruction *I) const {
1035   return TTIImpl->getInstructionLatency(I);
1036 }
1037 
1038 static bool matchPairwiseShuffleMask(ShuffleVectorInst *SI, bool IsLeft,
1039                                      unsigned Level) {
1040   // We don't need a shuffle if we just want to have element 0 in position 0 of
1041   // the vector.
1042   if (!SI && Level == 0 && IsLeft)
1043     return true;
1044   else if (!SI)
1045     return false;
1046 
1047   SmallVector<int, 32> Mask(
1048       cast<FixedVectorType>(SI->getType())->getNumElements(), -1);
1049 
1050   // Build a mask of 0, 2, ... (left) or 1, 3, ... (right) depending on whether
1051   // we look at the left or right side.
1052   for (unsigned i = 0, e = (1 << Level), val = !IsLeft; i != e; ++i, val += 2)
1053     Mask[i] = val;
1054 
1055   ArrayRef<int> ActualMask = SI->getShuffleMask();
1056   return Mask == ActualMask;
1057 }
1058 
1059 static Optional<TTI::ReductionData> getReductionData(Instruction *I) {
1060   Value *L, *R;
1061   if (m_BinOp(m_Value(L), m_Value(R)).match(I))
1062     return TTI::ReductionData(TTI::RK_Arithmetic, I->getOpcode(), L, R);
1063   if (auto *SI = dyn_cast<SelectInst>(I)) {
1064     if (m_SMin(m_Value(L), m_Value(R)).match(SI) ||
1065         m_SMax(m_Value(L), m_Value(R)).match(SI) ||
1066         m_OrdFMin(m_Value(L), m_Value(R)).match(SI) ||
1067         m_OrdFMax(m_Value(L), m_Value(R)).match(SI) ||
1068         m_UnordFMin(m_Value(L), m_Value(R)).match(SI) ||
1069         m_UnordFMax(m_Value(L), m_Value(R)).match(SI)) {
1070       auto *CI = cast<CmpInst>(SI->getCondition());
1071       return TTI::ReductionData(TTI::RK_MinMax, CI->getOpcode(), L, R);
1072     }
1073     if (m_UMin(m_Value(L), m_Value(R)).match(SI) ||
1074         m_UMax(m_Value(L), m_Value(R)).match(SI)) {
1075       auto *CI = cast<CmpInst>(SI->getCondition());
1076       return TTI::ReductionData(TTI::RK_UnsignedMinMax, CI->getOpcode(), L, R);
1077     }
1078   }
1079   return llvm::None;
1080 }
1081 
1082 static TTI::ReductionKind matchPairwiseReductionAtLevel(Instruction *I,
1083                                                         unsigned Level,
1084                                                         unsigned NumLevels) {
1085   // Match one level of pairwise operations.
1086   // %rdx.shuf.0.0 = shufflevector <4 x float> %rdx, <4 x float> undef,
1087   //       <4 x i32> <i32 0, i32 2 , i32 undef, i32 undef>
1088   // %rdx.shuf.0.1 = shufflevector <4 x float> %rdx, <4 x float> undef,
1089   //       <4 x i32> <i32 1, i32 3, i32 undef, i32 undef>
1090   // %bin.rdx.0 = fadd <4 x float> %rdx.shuf.0.0, %rdx.shuf.0.1
1091   if (!I)
1092     return TTI::RK_None;
1093 
1094   assert(I->getType()->isVectorTy() && "Expecting a vector type");
1095 
1096   Optional<TTI::ReductionData> RD = getReductionData(I);
1097   if (!RD)
1098     return TTI::RK_None;
1099 
1100   ShuffleVectorInst *LS = dyn_cast<ShuffleVectorInst>(RD->LHS);
1101   if (!LS && Level)
1102     return TTI::RK_None;
1103   ShuffleVectorInst *RS = dyn_cast<ShuffleVectorInst>(RD->RHS);
1104   if (!RS && Level)
1105     return TTI::RK_None;
1106 
1107   // On level 0 we can omit one shufflevector instruction.
1108   if (!Level && !RS && !LS)
1109     return TTI::RK_None;
1110 
1111   // Shuffle inputs must match.
1112   Value *NextLevelOpL = LS ? LS->getOperand(0) : nullptr;
1113   Value *NextLevelOpR = RS ? RS->getOperand(0) : nullptr;
1114   Value *NextLevelOp = nullptr;
1115   if (NextLevelOpR && NextLevelOpL) {
1116     // If we have two shuffles their operands must match.
1117     if (NextLevelOpL != NextLevelOpR)
1118       return TTI::RK_None;
1119 
1120     NextLevelOp = NextLevelOpL;
1121   } else if (Level == 0 && (NextLevelOpR || NextLevelOpL)) {
1122     // On the first level we can omit the shufflevector <0, undef,...>. So the
1123     // input to the other shufflevector <1, undef> must match with one of the
1124     // inputs to the current binary operation.
1125     // Example:
1126     //  %NextLevelOpL = shufflevector %R, <1, undef ...>
1127     //  %BinOp        = fadd          %NextLevelOpL, %R
1128     if (NextLevelOpL && NextLevelOpL != RD->RHS)
1129       return TTI::RK_None;
1130     else if (NextLevelOpR && NextLevelOpR != RD->LHS)
1131       return TTI::RK_None;
1132 
1133     NextLevelOp = NextLevelOpL ? RD->RHS : RD->LHS;
1134   } else
1135     return TTI::RK_None;
1136 
1137   // Check that the next levels binary operation exists and matches with the
1138   // current one.
1139   if (Level + 1 != NumLevels) {
1140     if (!isa<Instruction>(NextLevelOp))
1141       return TTI::RK_None;
1142     Optional<TTI::ReductionData> NextLevelRD =
1143         getReductionData(cast<Instruction>(NextLevelOp));
1144     if (!NextLevelRD || !RD->hasSameData(*NextLevelRD))
1145       return TTI::RK_None;
1146   }
1147 
1148   // Shuffle mask for pairwise operation must match.
1149   if (matchPairwiseShuffleMask(LS, /*IsLeft=*/true, Level)) {
1150     if (!matchPairwiseShuffleMask(RS, /*IsLeft=*/false, Level))
1151       return TTI::RK_None;
1152   } else if (matchPairwiseShuffleMask(RS, /*IsLeft=*/true, Level)) {
1153     if (!matchPairwiseShuffleMask(LS, /*IsLeft=*/false, Level))
1154       return TTI::RK_None;
1155   } else {
1156     return TTI::RK_None;
1157   }
1158 
1159   if (++Level == NumLevels)
1160     return RD->Kind;
1161 
1162   // Match next level.
1163   return matchPairwiseReductionAtLevel(dyn_cast<Instruction>(NextLevelOp), Level,
1164                                        NumLevels);
1165 }
1166 
1167 TTI::ReductionKind TTI::matchPairwiseReduction(
1168   const ExtractElementInst *ReduxRoot, unsigned &Opcode, VectorType *&Ty) {
1169   if (!EnableReduxCost)
1170     return TTI::RK_None;
1171 
1172   // Need to extract the first element.
1173   ConstantInt *CI = dyn_cast<ConstantInt>(ReduxRoot->getOperand(1));
1174   unsigned Idx = ~0u;
1175   if (CI)
1176     Idx = CI->getZExtValue();
1177   if (Idx != 0)
1178     return TTI::RK_None;
1179 
1180   auto *RdxStart = dyn_cast<Instruction>(ReduxRoot->getOperand(0));
1181   if (!RdxStart)
1182     return TTI::RK_None;
1183   Optional<TTI::ReductionData> RD = getReductionData(RdxStart);
1184   if (!RD)
1185     return TTI::RK_None;
1186 
1187   auto *VecTy = cast<FixedVectorType>(RdxStart->getType());
1188   unsigned NumVecElems = VecTy->getNumElements();
1189   if (!isPowerOf2_32(NumVecElems))
1190     return TTI::RK_None;
1191 
1192   // We look for a sequence of shuffle,shuffle,add triples like the following
1193   // that builds a pairwise reduction tree.
1194   //
1195   //  (X0, X1, X2, X3)
1196   //   (X0 + X1, X2 + X3, undef, undef)
1197   //    ((X0 + X1) + (X2 + X3), undef, undef, undef)
1198   //
1199   // %rdx.shuf.0.0 = shufflevector <4 x float> %rdx, <4 x float> undef,
1200   //       <4 x i32> <i32 0, i32 2 , i32 undef, i32 undef>
1201   // %rdx.shuf.0.1 = shufflevector <4 x float> %rdx, <4 x float> undef,
1202   //       <4 x i32> <i32 1, i32 3, i32 undef, i32 undef>
1203   // %bin.rdx.0 = fadd <4 x float> %rdx.shuf.0.0, %rdx.shuf.0.1
1204   // %rdx.shuf.1.0 = shufflevector <4 x float> %bin.rdx.0, <4 x float> undef,
1205   //       <4 x i32> <i32 0, i32 undef, i32 undef, i32 undef>
1206   // %rdx.shuf.1.1 = shufflevector <4 x float> %bin.rdx.0, <4 x float> undef,
1207   //       <4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
1208   // %bin.rdx8 = fadd <4 x float> %rdx.shuf.1.0, %rdx.shuf.1.1
1209   // %r = extractelement <4 x float> %bin.rdx8, i32 0
1210   if (matchPairwiseReductionAtLevel(RdxStart, 0, Log2_32(NumVecElems)) ==
1211       TTI::RK_None)
1212     return TTI::RK_None;
1213 
1214   Opcode = RD->Opcode;
1215   Ty = VecTy;
1216 
1217   return RD->Kind;
1218 }
1219 
1220 static std::pair<Value *, ShuffleVectorInst *>
1221 getShuffleAndOtherOprd(Value *L, Value *R) {
1222   ShuffleVectorInst *S = nullptr;
1223 
1224   if ((S = dyn_cast<ShuffleVectorInst>(L)))
1225     return std::make_pair(R, S);
1226 
1227   S = dyn_cast<ShuffleVectorInst>(R);
1228   return std::make_pair(L, S);
1229 }
1230 
1231 TTI::ReductionKind TTI::matchVectorSplittingReduction(
1232   const ExtractElementInst *ReduxRoot, unsigned &Opcode, VectorType *&Ty) {
1233 
1234   if (!EnableReduxCost)
1235     return TTI::RK_None;
1236 
1237   // Need to extract the first element.
1238   ConstantInt *CI = dyn_cast<ConstantInt>(ReduxRoot->getOperand(1));
1239   unsigned Idx = ~0u;
1240   if (CI)
1241     Idx = CI->getZExtValue();
1242   if (Idx != 0)
1243     return TTI::RK_None;
1244 
1245   auto *RdxStart = dyn_cast<Instruction>(ReduxRoot->getOperand(0));
1246   if (!RdxStart)
1247     return TTI::RK_None;
1248   Optional<TTI::ReductionData> RD = getReductionData(RdxStart);
1249   if (!RD)
1250     return TTI::RK_None;
1251 
1252   auto *VecTy = cast<FixedVectorType>(ReduxRoot->getOperand(0)->getType());
1253   unsigned NumVecElems = VecTy->getNumElements();
1254   if (!isPowerOf2_32(NumVecElems))
1255     return TTI::RK_None;
1256 
1257   // We look for a sequence of shuffles and adds like the following matching one
1258   // fadd, shuffle vector pair at a time.
1259   //
1260   // %rdx.shuf = shufflevector <4 x float> %rdx, <4 x float> undef,
1261   //                           <4 x i32> <i32 2, i32 3, i32 undef, i32 undef>
1262   // %bin.rdx = fadd <4 x float> %rdx, %rdx.shuf
1263   // %rdx.shuf7 = shufflevector <4 x float> %bin.rdx, <4 x float> undef,
1264   //                          <4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
1265   // %bin.rdx8 = fadd <4 x float> %bin.rdx, %rdx.shuf7
1266   // %r = extractelement <4 x float> %bin.rdx8, i32 0
1267 
1268   unsigned MaskStart = 1;
1269   Instruction *RdxOp = RdxStart;
1270   SmallVector<int, 32> ShuffleMask(NumVecElems, 0);
1271   unsigned NumVecElemsRemain = NumVecElems;
1272   while (NumVecElemsRemain - 1) {
1273     // Check for the right reduction operation.
1274     if (!RdxOp)
1275       return TTI::RK_None;
1276     Optional<TTI::ReductionData> RDLevel = getReductionData(RdxOp);
1277     if (!RDLevel || !RDLevel->hasSameData(*RD))
1278       return TTI::RK_None;
1279 
1280     Value *NextRdxOp;
1281     ShuffleVectorInst *Shuffle;
1282     std::tie(NextRdxOp, Shuffle) =
1283         getShuffleAndOtherOprd(RDLevel->LHS, RDLevel->RHS);
1284 
1285     // Check the current reduction operation and the shuffle use the same value.
1286     if (Shuffle == nullptr)
1287       return TTI::RK_None;
1288     if (Shuffle->getOperand(0) != NextRdxOp)
1289       return TTI::RK_None;
1290 
1291     // Check that shuffle masks matches.
1292     for (unsigned j = 0; j != MaskStart; ++j)
1293       ShuffleMask[j] = MaskStart + j;
1294     // Fill the rest of the mask with -1 for undef.
1295     std::fill(&ShuffleMask[MaskStart], ShuffleMask.end(), -1);
1296 
1297     ArrayRef<int> Mask = Shuffle->getShuffleMask();
1298     if (ShuffleMask != Mask)
1299       return TTI::RK_None;
1300 
1301     RdxOp = dyn_cast<Instruction>(NextRdxOp);
1302     NumVecElemsRemain /= 2;
1303     MaskStart *= 2;
1304   }
1305 
1306   Opcode = RD->Opcode;
1307   Ty = VecTy;
1308   return RD->Kind;
1309 }
1310 
1311 TTI::ReductionKind
1312 TTI::matchVectorReduction(const ExtractElementInst *Root, unsigned &Opcode,
1313                           VectorType *&Ty, bool &IsPairwise) {
1314   TTI::ReductionKind RdxKind = matchVectorSplittingReduction(Root, Opcode, Ty);
1315   if (RdxKind != TTI::ReductionKind::RK_None) {
1316     IsPairwise = false;
1317     return RdxKind;
1318   }
1319   IsPairwise = true;
1320   return matchPairwiseReduction(Root, Opcode, Ty);
1321 }
1322 
1323 int TargetTransformInfo::getInstructionThroughput(const Instruction *I) const {
1324   TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
1325 
1326   switch (I->getOpcode()) {
1327   case Instruction::GetElementPtr:
1328   case Instruction::Ret:
1329   case Instruction::PHI:
1330   case Instruction::Br:
1331   case Instruction::Add:
1332   case Instruction::FAdd:
1333   case Instruction::Sub:
1334   case Instruction::FSub:
1335   case Instruction::Mul:
1336   case Instruction::FMul:
1337   case Instruction::UDiv:
1338   case Instruction::SDiv:
1339   case Instruction::FDiv:
1340   case Instruction::URem:
1341   case Instruction::SRem:
1342   case Instruction::FRem:
1343   case Instruction::Shl:
1344   case Instruction::LShr:
1345   case Instruction::AShr:
1346   case Instruction::And:
1347   case Instruction::Or:
1348   case Instruction::Xor:
1349   case Instruction::FNeg:
1350   case Instruction::Select:
1351   case Instruction::ICmp:
1352   case Instruction::FCmp:
1353   case Instruction::Store:
1354   case Instruction::Load:
1355   case Instruction::ZExt:
1356   case Instruction::SExt:
1357   case Instruction::FPToUI:
1358   case Instruction::FPToSI:
1359   case Instruction::FPExt:
1360   case Instruction::PtrToInt:
1361   case Instruction::IntToPtr:
1362   case Instruction::SIToFP:
1363   case Instruction::UIToFP:
1364   case Instruction::Trunc:
1365   case Instruction::FPTrunc:
1366   case Instruction::BitCast:
1367   case Instruction::AddrSpaceCast:
1368   case Instruction::ExtractElement:
1369   case Instruction::InsertElement:
1370   case Instruction::ExtractValue:
1371   case Instruction::ShuffleVector:
1372   case Instruction::Call:
1373     return getUserCost(I, CostKind);
1374   default:
1375     // We don't have any information on this instruction.
1376     return -1;
1377   }
1378 }
1379 
1380 TargetTransformInfo::Concept::~Concept() {}
1381 
1382 TargetIRAnalysis::TargetIRAnalysis() : TTICallback(&getDefaultTTI) {}
1383 
1384 TargetIRAnalysis::TargetIRAnalysis(
1385     std::function<Result(const Function &)> TTICallback)
1386     : TTICallback(std::move(TTICallback)) {}
1387 
1388 TargetIRAnalysis::Result TargetIRAnalysis::run(const Function &F,
1389                                                FunctionAnalysisManager &) {
1390   return TTICallback(F);
1391 }
1392 
1393 AnalysisKey TargetIRAnalysis::Key;
1394 
1395 TargetIRAnalysis::Result TargetIRAnalysis::getDefaultTTI(const Function &F) {
1396   return Result(F.getParent()->getDataLayout());
1397 }
1398 
1399 // Register the basic pass.
1400 INITIALIZE_PASS(TargetTransformInfoWrapperPass, "tti",
1401                 "Target Transform Information", false, true)
1402 char TargetTransformInfoWrapperPass::ID = 0;
1403 
1404 void TargetTransformInfoWrapperPass::anchor() {}
1405 
1406 TargetTransformInfoWrapperPass::TargetTransformInfoWrapperPass()
1407     : ImmutablePass(ID) {
1408   initializeTargetTransformInfoWrapperPassPass(
1409       *PassRegistry::getPassRegistry());
1410 }
1411 
1412 TargetTransformInfoWrapperPass::TargetTransformInfoWrapperPass(
1413     TargetIRAnalysis TIRA)
1414     : ImmutablePass(ID), TIRA(std::move(TIRA)) {
1415   initializeTargetTransformInfoWrapperPassPass(
1416       *PassRegistry::getPassRegistry());
1417 }
1418 
1419 TargetTransformInfo &TargetTransformInfoWrapperPass::getTTI(const Function &F) {
1420   FunctionAnalysisManager DummyFAM;
1421   TTI = TIRA.run(F, DummyFAM);
1422   return *TTI;
1423 }
1424 
1425 ImmutablePass *
1426 llvm::createTargetTransformInfoWrapperPass(TargetIRAnalysis TIRA) {
1427   return new TargetTransformInfoWrapperPass(std::move(TIRA));
1428 }
1429