1 //===- llvm/Analysis/TargetTransformInfo.cpp ------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "llvm/Analysis/TargetTransformInfo.h"
10 #include "llvm/Analysis/CFG.h"
11 #include "llvm/Analysis/LoopIterator.h"
12 #include "llvm/Analysis/TargetTransformInfoImpl.h"
13 #include "llvm/IR/CFG.h"
14 #include "llvm/IR/Dominators.h"
15 #include "llvm/IR/Instruction.h"
16 #include "llvm/IR/Instructions.h"
17 #include "llvm/IR/IntrinsicInst.h"
18 #include "llvm/IR/Module.h"
19 #include "llvm/IR/Operator.h"
20 #include "llvm/IR/PatternMatch.h"
21 #include "llvm/InitializePasses.h"
22 #include "llvm/Support/CommandLine.h"
23 #include <utility>
24 
25 using namespace llvm;
26 using namespace PatternMatch;
27 
28 #define DEBUG_TYPE "tti"
29 
30 static cl::opt<bool> EnableReduxCost("costmodel-reduxcost", cl::init(false),
31                                      cl::Hidden,
32                                      cl::desc("Recognize reduction patterns."));
33 
34 static cl::opt<unsigned> CacheLineSize(
35     "cache-line-size", cl::init(0), cl::Hidden,
36     cl::desc("Use this to override the target cache line size when "
37              "specified by the user."));
38 
39 namespace {
40 /// No-op implementation of the TTI interface using the utility base
41 /// classes.
42 ///
43 /// This is used when no target specific information is available.
44 struct NoTTIImpl : TargetTransformInfoImplCRTPBase<NoTTIImpl> {
45   explicit NoTTIImpl(const DataLayout &DL)
46       : TargetTransformInfoImplCRTPBase<NoTTIImpl>(DL) {}
47 };
48 } // namespace
49 
50 bool HardwareLoopInfo::canAnalyze(LoopInfo &LI) {
51   // If the loop has irreducible control flow, it can not be converted to
52   // Hardware loop.
53   LoopBlocksRPO RPOT(L);
54   RPOT.perform(&LI);
55   if (containsIrreducibleCFG<const BasicBlock *>(RPOT, LI))
56     return false;
57   return true;
58 }
59 
60 IntrinsicCostAttributes::IntrinsicCostAttributes(
61     Intrinsic::ID Id, const CallBase &CI, InstructionCost ScalarizationCost)
62     : II(dyn_cast<IntrinsicInst>(&CI)), RetTy(CI.getType()), IID(Id),
63       ScalarizationCost(ScalarizationCost) {
64 
65   if (const auto *FPMO = dyn_cast<FPMathOperator>(&CI))
66     FMF = FPMO->getFastMathFlags();
67 
68   Arguments.insert(Arguments.begin(), CI.arg_begin(), CI.arg_end());
69   FunctionType *FTy = CI.getCalledFunction()->getFunctionType();
70   ParamTys.insert(ParamTys.begin(), FTy->param_begin(), FTy->param_end());
71 }
72 
73 IntrinsicCostAttributes::IntrinsicCostAttributes(Intrinsic::ID Id, Type *RTy,
74                                                  ArrayRef<Type *> Tys,
75                                                  FastMathFlags Flags,
76                                                  const IntrinsicInst *I,
77                                                  InstructionCost ScalarCost)
78     : II(I), RetTy(RTy), IID(Id), FMF(Flags), ScalarizationCost(ScalarCost) {
79   ParamTys.insert(ParamTys.begin(), Tys.begin(), Tys.end());
80 }
81 
82 IntrinsicCostAttributes::IntrinsicCostAttributes(Intrinsic::ID Id, Type *Ty,
83                                                  ArrayRef<const Value *> Args)
84     : RetTy(Ty), IID(Id) {
85 
86   Arguments.insert(Arguments.begin(), Args.begin(), Args.end());
87   ParamTys.reserve(Arguments.size());
88   for (unsigned Idx = 0, Size = Arguments.size(); Idx != Size; ++Idx)
89     ParamTys.push_back(Arguments[Idx]->getType());
90 }
91 
92 IntrinsicCostAttributes::IntrinsicCostAttributes(Intrinsic::ID Id, Type *RTy,
93                                                  ArrayRef<const Value *> Args,
94                                                  ArrayRef<Type *> Tys,
95                                                  FastMathFlags Flags,
96                                                  const IntrinsicInst *I,
97                                                  InstructionCost ScalarCost)
98     : II(I), RetTy(RTy), IID(Id), FMF(Flags), ScalarizationCost(ScalarCost) {
99   ParamTys.insert(ParamTys.begin(), Tys.begin(), Tys.end());
100   Arguments.insert(Arguments.begin(), Args.begin(), Args.end());
101 }
102 
103 bool HardwareLoopInfo::isHardwareLoopCandidate(ScalarEvolution &SE,
104                                                LoopInfo &LI, DominatorTree &DT,
105                                                bool ForceNestedLoop,
106                                                bool ForceHardwareLoopPHI) {
107   SmallVector<BasicBlock *, 4> ExitingBlocks;
108   L->getExitingBlocks(ExitingBlocks);
109 
110   for (BasicBlock *BB : ExitingBlocks) {
111     // If we pass the updated counter back through a phi, we need to know
112     // which latch the updated value will be coming from.
113     if (!L->isLoopLatch(BB)) {
114       if (ForceHardwareLoopPHI || CounterInReg)
115         continue;
116     }
117 
118     const SCEV *EC = SE.getExitCount(L, BB);
119     if (isa<SCEVCouldNotCompute>(EC))
120       continue;
121     if (const SCEVConstant *ConstEC = dyn_cast<SCEVConstant>(EC)) {
122       if (ConstEC->getValue()->isZero())
123         continue;
124     } else if (!SE.isLoopInvariant(EC, L))
125       continue;
126 
127     if (SE.getTypeSizeInBits(EC->getType()) > CountType->getBitWidth())
128       continue;
129 
130     // If this exiting block is contained in a nested loop, it is not eligible
131     // for insertion of the branch-and-decrement since the inner loop would
132     // end up messing up the value in the CTR.
133     if (!IsNestingLegal && LI.getLoopFor(BB) != L && !ForceNestedLoop)
134       continue;
135 
136     // We now have a loop-invariant count of loop iterations (which is not the
137     // constant zero) for which we know that this loop will not exit via this
138     // existing block.
139 
140     // We need to make sure that this block will run on every loop iteration.
141     // For this to be true, we must dominate all blocks with backedges. Such
142     // blocks are in-loop predecessors to the header block.
143     bool NotAlways = false;
144     for (BasicBlock *Pred : predecessors(L->getHeader())) {
145       if (!L->contains(Pred))
146         continue;
147 
148       if (!DT.dominates(BB, Pred)) {
149         NotAlways = true;
150         break;
151       }
152     }
153 
154     if (NotAlways)
155       continue;
156 
157     // Make sure this blocks ends with a conditional branch.
158     Instruction *TI = BB->getTerminator();
159     if (!TI)
160       continue;
161 
162     if (BranchInst *BI = dyn_cast<BranchInst>(TI)) {
163       if (!BI->isConditional())
164         continue;
165 
166       ExitBranch = BI;
167     } else
168       continue;
169 
170     // Note that this block may not be the loop latch block, even if the loop
171     // has a latch block.
172     ExitBlock = BB;
173     ExitCount = EC;
174     break;
175   }
176 
177   if (!ExitBlock)
178     return false;
179   return true;
180 }
181 
182 TargetTransformInfo::TargetTransformInfo(const DataLayout &DL)
183     : TTIImpl(new Model<NoTTIImpl>(NoTTIImpl(DL))) {}
184 
185 TargetTransformInfo::~TargetTransformInfo() = default;
186 
187 TargetTransformInfo::TargetTransformInfo(TargetTransformInfo &&Arg)
188     : TTIImpl(std::move(Arg.TTIImpl)) {}
189 
190 TargetTransformInfo &TargetTransformInfo::operator=(TargetTransformInfo &&RHS) {
191   TTIImpl = std::move(RHS.TTIImpl);
192   return *this;
193 }
194 
195 unsigned TargetTransformInfo::getInliningThresholdMultiplier() const {
196   return TTIImpl->getInliningThresholdMultiplier();
197 }
198 
199 unsigned
200 TargetTransformInfo::adjustInliningThreshold(const CallBase *CB) const {
201   return TTIImpl->adjustInliningThreshold(CB);
202 }
203 
204 int TargetTransformInfo::getInlinerVectorBonusPercent() const {
205   return TTIImpl->getInlinerVectorBonusPercent();
206 }
207 
208 InstructionCost
209 TargetTransformInfo::getGEPCost(Type *PointeeType, const Value *Ptr,
210                                 ArrayRef<const Value *> Operands,
211                                 TTI::TargetCostKind CostKind) const {
212   return TTIImpl->getGEPCost(PointeeType, Ptr, Operands, CostKind);
213 }
214 
215 unsigned TargetTransformInfo::getEstimatedNumberOfCaseClusters(
216     const SwitchInst &SI, unsigned &JTSize, ProfileSummaryInfo *PSI,
217     BlockFrequencyInfo *BFI) const {
218   return TTIImpl->getEstimatedNumberOfCaseClusters(SI, JTSize, PSI, BFI);
219 }
220 
221 InstructionCost
222 TargetTransformInfo::getUserCost(const User *U,
223                                  ArrayRef<const Value *> Operands,
224                                  enum TargetCostKind CostKind) const {
225   InstructionCost Cost = TTIImpl->getUserCost(U, Operands, CostKind);
226   assert((CostKind == TTI::TCK_RecipThroughput || Cost >= 0) &&
227          "TTI should not produce negative costs!");
228   return Cost;
229 }
230 
231 BranchProbability TargetTransformInfo::getPredictableBranchThreshold() const {
232   return TTIImpl->getPredictableBranchThreshold();
233 }
234 
235 bool TargetTransformInfo::hasBranchDivergence() const {
236   return TTIImpl->hasBranchDivergence();
237 }
238 
239 bool TargetTransformInfo::useGPUDivergenceAnalysis() const {
240   return TTIImpl->useGPUDivergenceAnalysis();
241 }
242 
243 bool TargetTransformInfo::isSourceOfDivergence(const Value *V) const {
244   return TTIImpl->isSourceOfDivergence(V);
245 }
246 
247 bool llvm::TargetTransformInfo::isAlwaysUniform(const Value *V) const {
248   return TTIImpl->isAlwaysUniform(V);
249 }
250 
251 unsigned TargetTransformInfo::getFlatAddressSpace() const {
252   return TTIImpl->getFlatAddressSpace();
253 }
254 
255 bool TargetTransformInfo::collectFlatAddressOperands(
256     SmallVectorImpl<int> &OpIndexes, Intrinsic::ID IID) const {
257   return TTIImpl->collectFlatAddressOperands(OpIndexes, IID);
258 }
259 
260 bool TargetTransformInfo::isNoopAddrSpaceCast(unsigned FromAS,
261                                               unsigned ToAS) const {
262   return TTIImpl->isNoopAddrSpaceCast(FromAS, ToAS);
263 }
264 
265 bool TargetTransformInfo::canHaveNonUndefGlobalInitializerInAddressSpace(
266     unsigned AS) const {
267   return TTIImpl->canHaveNonUndefGlobalInitializerInAddressSpace(AS);
268 }
269 
270 unsigned TargetTransformInfo::getAssumedAddrSpace(const Value *V) const {
271   return TTIImpl->getAssumedAddrSpace(V);
272 }
273 
274 std::pair<const Value *, unsigned>
275 TargetTransformInfo::getPredicatedAddrSpace(const Value *V) const {
276   return TTIImpl->getPredicatedAddrSpace(V);
277 }
278 
279 Value *TargetTransformInfo::rewriteIntrinsicWithAddressSpace(
280     IntrinsicInst *II, Value *OldV, Value *NewV) const {
281   return TTIImpl->rewriteIntrinsicWithAddressSpace(II, OldV, NewV);
282 }
283 
284 bool TargetTransformInfo::isLoweredToCall(const Function *F) const {
285   return TTIImpl->isLoweredToCall(F);
286 }
287 
288 bool TargetTransformInfo::isHardwareLoopProfitable(
289     Loop *L, ScalarEvolution &SE, AssumptionCache &AC,
290     TargetLibraryInfo *LibInfo, HardwareLoopInfo &HWLoopInfo) const {
291   return TTIImpl->isHardwareLoopProfitable(L, SE, AC, LibInfo, HWLoopInfo);
292 }
293 
294 bool TargetTransformInfo::preferPredicateOverEpilogue(
295     Loop *L, LoopInfo *LI, ScalarEvolution &SE, AssumptionCache &AC,
296     TargetLibraryInfo *TLI, DominatorTree *DT,
297     const LoopAccessInfo *LAI) const {
298   return TTIImpl->preferPredicateOverEpilogue(L, LI, SE, AC, TLI, DT, LAI);
299 }
300 
301 bool TargetTransformInfo::emitGetActiveLaneMask() const {
302   return TTIImpl->emitGetActiveLaneMask();
303 }
304 
305 Optional<Instruction *>
306 TargetTransformInfo::instCombineIntrinsic(InstCombiner &IC,
307                                           IntrinsicInst &II) const {
308   return TTIImpl->instCombineIntrinsic(IC, II);
309 }
310 
311 Optional<Value *> TargetTransformInfo::simplifyDemandedUseBitsIntrinsic(
312     InstCombiner &IC, IntrinsicInst &II, APInt DemandedMask, KnownBits &Known,
313     bool &KnownBitsComputed) const {
314   return TTIImpl->simplifyDemandedUseBitsIntrinsic(IC, II, DemandedMask, Known,
315                                                    KnownBitsComputed);
316 }
317 
318 Optional<Value *> TargetTransformInfo::simplifyDemandedVectorEltsIntrinsic(
319     InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts,
320     APInt &UndefElts2, APInt &UndefElts3,
321     std::function<void(Instruction *, unsigned, APInt, APInt &)>
322         SimplifyAndSetOp) const {
323   return TTIImpl->simplifyDemandedVectorEltsIntrinsic(
324       IC, II, DemandedElts, UndefElts, UndefElts2, UndefElts3,
325       SimplifyAndSetOp);
326 }
327 
328 void TargetTransformInfo::getUnrollingPreferences(
329     Loop *L, ScalarEvolution &SE, UnrollingPreferences &UP,
330     OptimizationRemarkEmitter *ORE) const {
331   return TTIImpl->getUnrollingPreferences(L, SE, UP, ORE);
332 }
333 
334 void TargetTransformInfo::getPeelingPreferences(Loop *L, ScalarEvolution &SE,
335                                                 PeelingPreferences &PP) const {
336   return TTIImpl->getPeelingPreferences(L, SE, PP);
337 }
338 
339 bool TargetTransformInfo::isLegalAddImmediate(int64_t Imm) const {
340   return TTIImpl->isLegalAddImmediate(Imm);
341 }
342 
343 bool TargetTransformInfo::isLegalICmpImmediate(int64_t Imm) const {
344   return TTIImpl->isLegalICmpImmediate(Imm);
345 }
346 
347 bool TargetTransformInfo::isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV,
348                                                 int64_t BaseOffset,
349                                                 bool HasBaseReg, int64_t Scale,
350                                                 unsigned AddrSpace,
351                                                 Instruction *I) const {
352   return TTIImpl->isLegalAddressingMode(Ty, BaseGV, BaseOffset, HasBaseReg,
353                                         Scale, AddrSpace, I);
354 }
355 
356 bool TargetTransformInfo::isLSRCostLess(const LSRCost &C1,
357                                         const LSRCost &C2) const {
358   return TTIImpl->isLSRCostLess(C1, C2);
359 }
360 
361 bool TargetTransformInfo::isNumRegsMajorCostOfLSR() const {
362   return TTIImpl->isNumRegsMajorCostOfLSR();
363 }
364 
365 bool TargetTransformInfo::isProfitableLSRChainElement(Instruction *I) const {
366   return TTIImpl->isProfitableLSRChainElement(I);
367 }
368 
369 bool TargetTransformInfo::canMacroFuseCmp() const {
370   return TTIImpl->canMacroFuseCmp();
371 }
372 
373 bool TargetTransformInfo::canSaveCmp(Loop *L, BranchInst **BI,
374                                      ScalarEvolution *SE, LoopInfo *LI,
375                                      DominatorTree *DT, AssumptionCache *AC,
376                                      TargetLibraryInfo *LibInfo) const {
377   return TTIImpl->canSaveCmp(L, BI, SE, LI, DT, AC, LibInfo);
378 }
379 
380 TTI::AddressingModeKind
381 TargetTransformInfo::getPreferredAddressingMode(const Loop *L,
382                                                 ScalarEvolution *SE) const {
383   return TTIImpl->getPreferredAddressingMode(L, SE);
384 }
385 
386 bool TargetTransformInfo::isLegalMaskedStore(Type *DataType,
387                                              Align Alignment) const {
388   return TTIImpl->isLegalMaskedStore(DataType, Alignment);
389 }
390 
391 bool TargetTransformInfo::isLegalMaskedLoad(Type *DataType,
392                                             Align Alignment) const {
393   return TTIImpl->isLegalMaskedLoad(DataType, Alignment);
394 }
395 
396 bool TargetTransformInfo::isLegalNTStore(Type *DataType,
397                                          Align Alignment) const {
398   return TTIImpl->isLegalNTStore(DataType, Alignment);
399 }
400 
401 bool TargetTransformInfo::isLegalNTLoad(Type *DataType, Align Alignment) const {
402   return TTIImpl->isLegalNTLoad(DataType, Alignment);
403 }
404 
405 bool TargetTransformInfo::isLegalBroadcastLoad(Type *ElementTy,
406                                                ElementCount NumElements) const {
407   return TTIImpl->isLegalBroadcastLoad(ElementTy, NumElements);
408 }
409 
410 bool TargetTransformInfo::isLegalMaskedGather(Type *DataType,
411                                               Align Alignment) const {
412   return TTIImpl->isLegalMaskedGather(DataType, Alignment);
413 }
414 
415 bool TargetTransformInfo::isLegalMaskedScatter(Type *DataType,
416                                                Align Alignment) const {
417   return TTIImpl->isLegalMaskedScatter(DataType, Alignment);
418 }
419 
420 bool TargetTransformInfo::forceScalarizeMaskedGather(VectorType *DataType,
421                                                      Align Alignment) const {
422   return TTIImpl->forceScalarizeMaskedGather(DataType, Alignment);
423 }
424 
425 bool TargetTransformInfo::forceScalarizeMaskedScatter(VectorType *DataType,
426                                                       Align Alignment) const {
427   return TTIImpl->forceScalarizeMaskedScatter(DataType, Alignment);
428 }
429 
430 bool TargetTransformInfo::isLegalMaskedCompressStore(Type *DataType) const {
431   return TTIImpl->isLegalMaskedCompressStore(DataType);
432 }
433 
434 bool TargetTransformInfo::isLegalMaskedExpandLoad(Type *DataType) const {
435   return TTIImpl->isLegalMaskedExpandLoad(DataType);
436 }
437 
438 bool TargetTransformInfo::enableOrderedReductions() const {
439   return TTIImpl->enableOrderedReductions();
440 }
441 
442 bool TargetTransformInfo::hasDivRemOp(Type *DataType, bool IsSigned) const {
443   return TTIImpl->hasDivRemOp(DataType, IsSigned);
444 }
445 
446 bool TargetTransformInfo::hasVolatileVariant(Instruction *I,
447                                              unsigned AddrSpace) const {
448   return TTIImpl->hasVolatileVariant(I, AddrSpace);
449 }
450 
451 bool TargetTransformInfo::prefersVectorizedAddressing() const {
452   return TTIImpl->prefersVectorizedAddressing();
453 }
454 
455 InstructionCost TargetTransformInfo::getScalingFactorCost(
456     Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset, bool HasBaseReg,
457     int64_t Scale, unsigned AddrSpace) const {
458   InstructionCost Cost = TTIImpl->getScalingFactorCost(
459       Ty, BaseGV, BaseOffset, HasBaseReg, Scale, AddrSpace);
460   assert(Cost >= 0 && "TTI should not produce negative costs!");
461   return Cost;
462 }
463 
464 bool TargetTransformInfo::LSRWithInstrQueries() const {
465   return TTIImpl->LSRWithInstrQueries();
466 }
467 
468 bool TargetTransformInfo::isTruncateFree(Type *Ty1, Type *Ty2) const {
469   return TTIImpl->isTruncateFree(Ty1, Ty2);
470 }
471 
472 bool TargetTransformInfo::isProfitableToHoist(Instruction *I) const {
473   return TTIImpl->isProfitableToHoist(I);
474 }
475 
476 bool TargetTransformInfo::useAA() const { return TTIImpl->useAA(); }
477 
478 bool TargetTransformInfo::isTypeLegal(Type *Ty) const {
479   return TTIImpl->isTypeLegal(Ty);
480 }
481 
482 unsigned TargetTransformInfo::getRegUsageForType(Type *Ty) const {
483   return TTIImpl->getRegUsageForType(Ty);
484 }
485 
486 bool TargetTransformInfo::shouldBuildLookupTables() const {
487   return TTIImpl->shouldBuildLookupTables();
488 }
489 
490 bool TargetTransformInfo::shouldBuildLookupTablesForConstant(
491     Constant *C) const {
492   return TTIImpl->shouldBuildLookupTablesForConstant(C);
493 }
494 
495 bool TargetTransformInfo::shouldBuildRelLookupTables() const {
496   return TTIImpl->shouldBuildRelLookupTables();
497 }
498 
499 bool TargetTransformInfo::useColdCCForColdCall(Function &F) const {
500   return TTIImpl->useColdCCForColdCall(F);
501 }
502 
503 InstructionCost
504 TargetTransformInfo::getScalarizationOverhead(VectorType *Ty,
505                                               const APInt &DemandedElts,
506                                               bool Insert, bool Extract) const {
507   return TTIImpl->getScalarizationOverhead(Ty, DemandedElts, Insert, Extract);
508 }
509 
510 InstructionCost TargetTransformInfo::getOperandsScalarizationOverhead(
511     ArrayRef<const Value *> Args, ArrayRef<Type *> Tys) const {
512   return TTIImpl->getOperandsScalarizationOverhead(Args, Tys);
513 }
514 
515 bool TargetTransformInfo::supportsEfficientVectorElementLoadStore() const {
516   return TTIImpl->supportsEfficientVectorElementLoadStore();
517 }
518 
519 bool TargetTransformInfo::enableAggressiveInterleaving(
520     bool LoopHasReductions) const {
521   return TTIImpl->enableAggressiveInterleaving(LoopHasReductions);
522 }
523 
524 TargetTransformInfo::MemCmpExpansionOptions
525 TargetTransformInfo::enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const {
526   return TTIImpl->enableMemCmpExpansion(OptSize, IsZeroCmp);
527 }
528 
529 bool TargetTransformInfo::enableInterleavedAccessVectorization() const {
530   return TTIImpl->enableInterleavedAccessVectorization();
531 }
532 
533 bool TargetTransformInfo::enableMaskedInterleavedAccessVectorization() const {
534   return TTIImpl->enableMaskedInterleavedAccessVectorization();
535 }
536 
537 bool TargetTransformInfo::isFPVectorizationPotentiallyUnsafe() const {
538   return TTIImpl->isFPVectorizationPotentiallyUnsafe();
539 }
540 
541 bool TargetTransformInfo::allowsMisalignedMemoryAccesses(LLVMContext &Context,
542                                                          unsigned BitWidth,
543                                                          unsigned AddressSpace,
544                                                          Align Alignment,
545                                                          bool *Fast) const {
546   return TTIImpl->allowsMisalignedMemoryAccesses(Context, BitWidth,
547                                                  AddressSpace, Alignment, Fast);
548 }
549 
550 TargetTransformInfo::PopcntSupportKind
551 TargetTransformInfo::getPopcntSupport(unsigned IntTyWidthInBit) const {
552   return TTIImpl->getPopcntSupport(IntTyWidthInBit);
553 }
554 
555 bool TargetTransformInfo::haveFastSqrt(Type *Ty) const {
556   return TTIImpl->haveFastSqrt(Ty);
557 }
558 
559 bool TargetTransformInfo::isFCmpOrdCheaperThanFCmpZero(Type *Ty) const {
560   return TTIImpl->isFCmpOrdCheaperThanFCmpZero(Ty);
561 }
562 
563 InstructionCost TargetTransformInfo::getFPOpCost(Type *Ty) const {
564   InstructionCost Cost = TTIImpl->getFPOpCost(Ty);
565   assert(Cost >= 0 && "TTI should not produce negative costs!");
566   return Cost;
567 }
568 
569 InstructionCost TargetTransformInfo::getIntImmCodeSizeCost(unsigned Opcode,
570                                                            unsigned Idx,
571                                                            const APInt &Imm,
572                                                            Type *Ty) const {
573   InstructionCost Cost = TTIImpl->getIntImmCodeSizeCost(Opcode, Idx, Imm, Ty);
574   assert(Cost >= 0 && "TTI should not produce negative costs!");
575   return Cost;
576 }
577 
578 InstructionCost
579 TargetTransformInfo::getIntImmCost(const APInt &Imm, Type *Ty,
580                                    TTI::TargetCostKind CostKind) const {
581   InstructionCost Cost = TTIImpl->getIntImmCost(Imm, Ty, CostKind);
582   assert(Cost >= 0 && "TTI should not produce negative costs!");
583   return Cost;
584 }
585 
586 InstructionCost TargetTransformInfo::getIntImmCostInst(
587     unsigned Opcode, unsigned Idx, const APInt &Imm, Type *Ty,
588     TTI::TargetCostKind CostKind, Instruction *Inst) const {
589   InstructionCost Cost =
590       TTIImpl->getIntImmCostInst(Opcode, Idx, Imm, Ty, CostKind, Inst);
591   assert(Cost >= 0 && "TTI should not produce negative costs!");
592   return Cost;
593 }
594 
595 InstructionCost
596 TargetTransformInfo::getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx,
597                                          const APInt &Imm, Type *Ty,
598                                          TTI::TargetCostKind CostKind) const {
599   InstructionCost Cost =
600       TTIImpl->getIntImmCostIntrin(IID, Idx, Imm, Ty, CostKind);
601   assert(Cost >= 0 && "TTI should not produce negative costs!");
602   return Cost;
603 }
604 
605 unsigned TargetTransformInfo::getNumberOfRegisters(unsigned ClassID) const {
606   return TTIImpl->getNumberOfRegisters(ClassID);
607 }
608 
609 unsigned TargetTransformInfo::getRegisterClassForType(bool Vector,
610                                                       Type *Ty) const {
611   return TTIImpl->getRegisterClassForType(Vector, Ty);
612 }
613 
614 const char *TargetTransformInfo::getRegisterClassName(unsigned ClassID) const {
615   return TTIImpl->getRegisterClassName(ClassID);
616 }
617 
618 TypeSize TargetTransformInfo::getRegisterBitWidth(
619     TargetTransformInfo::RegisterKind K) const {
620   return TTIImpl->getRegisterBitWidth(K);
621 }
622 
623 unsigned TargetTransformInfo::getMinVectorRegisterBitWidth() const {
624   return TTIImpl->getMinVectorRegisterBitWidth();
625 }
626 
627 Optional<unsigned> TargetTransformInfo::getMaxVScale() const {
628   return TTIImpl->getMaxVScale();
629 }
630 
631 Optional<unsigned> TargetTransformInfo::getVScaleForTuning() const {
632   return TTIImpl->getVScaleForTuning();
633 }
634 
635 bool TargetTransformInfo::shouldMaximizeVectorBandwidth(
636     TargetTransformInfo::RegisterKind K) const {
637   return TTIImpl->shouldMaximizeVectorBandwidth(K);
638 }
639 
640 ElementCount TargetTransformInfo::getMinimumVF(unsigned ElemWidth,
641                                                bool IsScalable) const {
642   return TTIImpl->getMinimumVF(ElemWidth, IsScalable);
643 }
644 
645 unsigned TargetTransformInfo::getMaximumVF(unsigned ElemWidth,
646                                            unsigned Opcode) const {
647   return TTIImpl->getMaximumVF(ElemWidth, Opcode);
648 }
649 
650 unsigned TargetTransformInfo::getStoreMinimumVF(unsigned VF, Type *ScalarMemTy,
651                                                 Type *ScalarValTy) const {
652   return TTIImpl->getStoreMinimumVF(VF, ScalarMemTy, ScalarValTy);
653 }
654 
655 bool TargetTransformInfo::shouldConsiderAddressTypePromotion(
656     const Instruction &I, bool &AllowPromotionWithoutCommonHeader) const {
657   return TTIImpl->shouldConsiderAddressTypePromotion(
658       I, AllowPromotionWithoutCommonHeader);
659 }
660 
661 unsigned TargetTransformInfo::getCacheLineSize() const {
662   return CacheLineSize.getNumOccurrences() > 0 ? CacheLineSize
663                                                : TTIImpl->getCacheLineSize();
664 }
665 
666 llvm::Optional<unsigned>
667 TargetTransformInfo::getCacheSize(CacheLevel Level) const {
668   return TTIImpl->getCacheSize(Level);
669 }
670 
671 llvm::Optional<unsigned>
672 TargetTransformInfo::getCacheAssociativity(CacheLevel Level) const {
673   return TTIImpl->getCacheAssociativity(Level);
674 }
675 
676 unsigned TargetTransformInfo::getPrefetchDistance() const {
677   return TTIImpl->getPrefetchDistance();
678 }
679 
680 unsigned TargetTransformInfo::getMinPrefetchStride(
681     unsigned NumMemAccesses, unsigned NumStridedMemAccesses,
682     unsigned NumPrefetches, bool HasCall) const {
683   return TTIImpl->getMinPrefetchStride(NumMemAccesses, NumStridedMemAccesses,
684                                        NumPrefetches, HasCall);
685 }
686 
687 unsigned TargetTransformInfo::getMaxPrefetchIterationsAhead() const {
688   return TTIImpl->getMaxPrefetchIterationsAhead();
689 }
690 
691 bool TargetTransformInfo::enableWritePrefetching() const {
692   return TTIImpl->enableWritePrefetching();
693 }
694 
695 unsigned TargetTransformInfo::getMaxInterleaveFactor(unsigned VF) const {
696   return TTIImpl->getMaxInterleaveFactor(VF);
697 }
698 
699 TargetTransformInfo::OperandValueKind
700 TargetTransformInfo::getOperandInfo(const Value *V,
701                                     OperandValueProperties &OpProps) {
702   OperandValueKind OpInfo = OK_AnyValue;
703   OpProps = OP_None;
704 
705   if (const auto *CI = dyn_cast<ConstantInt>(V)) {
706     if (CI->getValue().isPowerOf2())
707       OpProps = OP_PowerOf2;
708     return OK_UniformConstantValue;
709   }
710 
711   // A broadcast shuffle creates a uniform value.
712   // TODO: Add support for non-zero index broadcasts.
713   // TODO: Add support for different source vector width.
714   if (const auto *ShuffleInst = dyn_cast<ShuffleVectorInst>(V))
715     if (ShuffleInst->isZeroEltSplat())
716       OpInfo = OK_UniformValue;
717 
718   const Value *Splat = getSplatValue(V);
719 
720   // Check for a splat of a constant or for a non uniform vector of constants
721   // and check if the constant(s) are all powers of two.
722   if (isa<ConstantVector>(V) || isa<ConstantDataVector>(V)) {
723     OpInfo = OK_NonUniformConstantValue;
724     if (Splat) {
725       OpInfo = OK_UniformConstantValue;
726       if (auto *CI = dyn_cast<ConstantInt>(Splat))
727         if (CI->getValue().isPowerOf2())
728           OpProps = OP_PowerOf2;
729     } else if (const auto *CDS = dyn_cast<ConstantDataSequential>(V)) {
730       OpProps = OP_PowerOf2;
731       for (unsigned I = 0, E = CDS->getNumElements(); I != E; ++I) {
732         if (auto *CI = dyn_cast<ConstantInt>(CDS->getElementAsConstant(I)))
733           if (CI->getValue().isPowerOf2())
734             continue;
735         OpProps = OP_None;
736         break;
737       }
738     }
739   }
740 
741   // Check for a splat of a uniform value. This is not loop aware, so return
742   // true only for the obviously uniform cases (argument, globalvalue)
743   if (Splat && (isa<Argument>(Splat) || isa<GlobalValue>(Splat)))
744     OpInfo = OK_UniformValue;
745 
746   return OpInfo;
747 }
748 
749 InstructionCost TargetTransformInfo::getArithmeticInstrCost(
750     unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind,
751     OperandValueKind Opd1Info, OperandValueKind Opd2Info,
752     OperandValueProperties Opd1PropInfo, OperandValueProperties Opd2PropInfo,
753     ArrayRef<const Value *> Args, const Instruction *CxtI) const {
754   InstructionCost Cost =
755       TTIImpl->getArithmeticInstrCost(Opcode, Ty, CostKind, Opd1Info, Opd2Info,
756                                       Opd1PropInfo, Opd2PropInfo, Args, CxtI);
757   assert(Cost >= 0 && "TTI should not produce negative costs!");
758   return Cost;
759 }
760 
761 InstructionCost TargetTransformInfo::getShuffleCost(
762     ShuffleKind Kind, VectorType *Ty, ArrayRef<int> Mask, int Index,
763     VectorType *SubTp, ArrayRef<const Value *> Args) const {
764   InstructionCost Cost =
765       TTIImpl->getShuffleCost(Kind, Ty, Mask, Index, SubTp, Args);
766   assert(Cost >= 0 && "TTI should not produce negative costs!");
767   return Cost;
768 }
769 
770 TTI::CastContextHint
771 TargetTransformInfo::getCastContextHint(const Instruction *I) {
772   if (!I)
773     return CastContextHint::None;
774 
775   auto getLoadStoreKind = [](const Value *V, unsigned LdStOp, unsigned MaskedOp,
776                              unsigned GatScatOp) {
777     const Instruction *I = dyn_cast<Instruction>(V);
778     if (!I)
779       return CastContextHint::None;
780 
781     if (I->getOpcode() == LdStOp)
782       return CastContextHint::Normal;
783 
784     if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
785       if (II->getIntrinsicID() == MaskedOp)
786         return TTI::CastContextHint::Masked;
787       if (II->getIntrinsicID() == GatScatOp)
788         return TTI::CastContextHint::GatherScatter;
789     }
790 
791     return TTI::CastContextHint::None;
792   };
793 
794   switch (I->getOpcode()) {
795   case Instruction::ZExt:
796   case Instruction::SExt:
797   case Instruction::FPExt:
798     return getLoadStoreKind(I->getOperand(0), Instruction::Load,
799                             Intrinsic::masked_load, Intrinsic::masked_gather);
800   case Instruction::Trunc:
801   case Instruction::FPTrunc:
802     if (I->hasOneUse())
803       return getLoadStoreKind(*I->user_begin(), Instruction::Store,
804                               Intrinsic::masked_store,
805                               Intrinsic::masked_scatter);
806     break;
807   default:
808     return CastContextHint::None;
809   }
810 
811   return TTI::CastContextHint::None;
812 }
813 
814 InstructionCost TargetTransformInfo::getCastInstrCost(
815     unsigned Opcode, Type *Dst, Type *Src, CastContextHint CCH,
816     TTI::TargetCostKind CostKind, const Instruction *I) const {
817   assert((I == nullptr || I->getOpcode() == Opcode) &&
818          "Opcode should reflect passed instruction.");
819   InstructionCost Cost =
820       TTIImpl->getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I);
821   assert(Cost >= 0 && "TTI should not produce negative costs!");
822   return Cost;
823 }
824 
825 InstructionCost TargetTransformInfo::getExtractWithExtendCost(
826     unsigned Opcode, Type *Dst, VectorType *VecTy, unsigned Index) const {
827   InstructionCost Cost =
828       TTIImpl->getExtractWithExtendCost(Opcode, Dst, VecTy, Index);
829   assert(Cost >= 0 && "TTI should not produce negative costs!");
830   return Cost;
831 }
832 
833 InstructionCost TargetTransformInfo::getCFInstrCost(
834     unsigned Opcode, TTI::TargetCostKind CostKind, const Instruction *I) const {
835   assert((I == nullptr || I->getOpcode() == Opcode) &&
836          "Opcode should reflect passed instruction.");
837   InstructionCost Cost = TTIImpl->getCFInstrCost(Opcode, CostKind, I);
838   assert(Cost >= 0 && "TTI should not produce negative costs!");
839   return Cost;
840 }
841 
842 InstructionCost TargetTransformInfo::getCmpSelInstrCost(
843     unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred,
844     TTI::TargetCostKind CostKind, const Instruction *I) const {
845   assert((I == nullptr || I->getOpcode() == Opcode) &&
846          "Opcode should reflect passed instruction.");
847   InstructionCost Cost =
848       TTIImpl->getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind, I);
849   assert(Cost >= 0 && "TTI should not produce negative costs!");
850   return Cost;
851 }
852 
853 InstructionCost TargetTransformInfo::getVectorInstrCost(unsigned Opcode,
854                                                         Type *Val,
855                                                         unsigned Index) const {
856   InstructionCost Cost = TTIImpl->getVectorInstrCost(Opcode, Val, Index);
857   assert(Cost >= 0 && "TTI should not produce negative costs!");
858   return Cost;
859 }
860 
861 InstructionCost TargetTransformInfo::getReplicationShuffleCost(
862     Type *EltTy, int ReplicationFactor, int VF, const APInt &DemandedDstElts,
863     TTI::TargetCostKind CostKind) {
864   InstructionCost Cost = TTIImpl->getReplicationShuffleCost(
865       EltTy, ReplicationFactor, VF, DemandedDstElts, CostKind);
866   assert(Cost >= 0 && "TTI should not produce negative costs!");
867   return Cost;
868 }
869 
870 InstructionCost TargetTransformInfo::getMemoryOpCost(
871     unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace,
872     TTI::TargetCostKind CostKind, const Instruction *I) const {
873   assert((I == nullptr || I->getOpcode() == Opcode) &&
874          "Opcode should reflect passed instruction.");
875   InstructionCost Cost = TTIImpl->getMemoryOpCost(Opcode, Src, Alignment,
876                                                   AddressSpace, CostKind, I);
877   assert(Cost >= 0 && "TTI should not produce negative costs!");
878   return Cost;
879 }
880 
881 InstructionCost TargetTransformInfo::getMaskedMemoryOpCost(
882     unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace,
883     TTI::TargetCostKind CostKind) const {
884   InstructionCost Cost = TTIImpl->getMaskedMemoryOpCost(Opcode, Src, Alignment,
885                                                         AddressSpace, CostKind);
886   assert(Cost >= 0 && "TTI should not produce negative costs!");
887   return Cost;
888 }
889 
890 InstructionCost TargetTransformInfo::getGatherScatterOpCost(
891     unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask,
892     Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I) const {
893   InstructionCost Cost = TTIImpl->getGatherScatterOpCost(
894       Opcode, DataTy, Ptr, VariableMask, Alignment, CostKind, I);
895   assert(Cost >= 0 && "TTI should not produce negative costs!");
896   return Cost;
897 }
898 
899 InstructionCost TargetTransformInfo::getInterleavedMemoryOpCost(
900     unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
901     Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
902     bool UseMaskForCond, bool UseMaskForGaps) const {
903   InstructionCost Cost = TTIImpl->getInterleavedMemoryOpCost(
904       Opcode, VecTy, Factor, Indices, Alignment, AddressSpace, CostKind,
905       UseMaskForCond, UseMaskForGaps);
906   assert(Cost >= 0 && "TTI should not produce negative costs!");
907   return Cost;
908 }
909 
910 InstructionCost
911 TargetTransformInfo::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
912                                            TTI::TargetCostKind CostKind) const {
913   InstructionCost Cost = TTIImpl->getIntrinsicInstrCost(ICA, CostKind);
914   assert(Cost >= 0 && "TTI should not produce negative costs!");
915   return Cost;
916 }
917 
918 InstructionCost
919 TargetTransformInfo::getCallInstrCost(Function *F, Type *RetTy,
920                                       ArrayRef<Type *> Tys,
921                                       TTI::TargetCostKind CostKind) const {
922   InstructionCost Cost = TTIImpl->getCallInstrCost(F, RetTy, Tys, CostKind);
923   assert(Cost >= 0 && "TTI should not produce negative costs!");
924   return Cost;
925 }
926 
927 unsigned TargetTransformInfo::getNumberOfParts(Type *Tp) const {
928   return TTIImpl->getNumberOfParts(Tp);
929 }
930 
931 InstructionCost
932 TargetTransformInfo::getAddressComputationCost(Type *Tp, ScalarEvolution *SE,
933                                                const SCEV *Ptr) const {
934   InstructionCost Cost = TTIImpl->getAddressComputationCost(Tp, SE, Ptr);
935   assert(Cost >= 0 && "TTI should not produce negative costs!");
936   return Cost;
937 }
938 
939 InstructionCost TargetTransformInfo::getMemcpyCost(const Instruction *I) const {
940   InstructionCost Cost = TTIImpl->getMemcpyCost(I);
941   assert(Cost >= 0 && "TTI should not produce negative costs!");
942   return Cost;
943 }
944 
945 InstructionCost TargetTransformInfo::getArithmeticReductionCost(
946     unsigned Opcode, VectorType *Ty, Optional<FastMathFlags> FMF,
947     TTI::TargetCostKind CostKind) const {
948   InstructionCost Cost =
949       TTIImpl->getArithmeticReductionCost(Opcode, Ty, FMF, CostKind);
950   assert(Cost >= 0 && "TTI should not produce negative costs!");
951   return Cost;
952 }
953 
954 InstructionCost TargetTransformInfo::getMinMaxReductionCost(
955     VectorType *Ty, VectorType *CondTy, bool IsUnsigned,
956     TTI::TargetCostKind CostKind) const {
957   InstructionCost Cost =
958       TTIImpl->getMinMaxReductionCost(Ty, CondTy, IsUnsigned, CostKind);
959   assert(Cost >= 0 && "TTI should not produce negative costs!");
960   return Cost;
961 }
962 
963 InstructionCost TargetTransformInfo::getExtendedAddReductionCost(
964     bool IsMLA, bool IsUnsigned, Type *ResTy, VectorType *Ty,
965     TTI::TargetCostKind CostKind) const {
966   return TTIImpl->getExtendedAddReductionCost(IsMLA, IsUnsigned, ResTy, Ty,
967                                               CostKind);
968 }
969 
970 InstructionCost
971 TargetTransformInfo::getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys) const {
972   return TTIImpl->getCostOfKeepingLiveOverCall(Tys);
973 }
974 
975 bool TargetTransformInfo::getTgtMemIntrinsic(IntrinsicInst *Inst,
976                                              MemIntrinsicInfo &Info) const {
977   return TTIImpl->getTgtMemIntrinsic(Inst, Info);
978 }
979 
980 unsigned TargetTransformInfo::getAtomicMemIntrinsicMaxElementSize() const {
981   return TTIImpl->getAtomicMemIntrinsicMaxElementSize();
982 }
983 
984 Value *TargetTransformInfo::getOrCreateResultFromMemIntrinsic(
985     IntrinsicInst *Inst, Type *ExpectedType) const {
986   return TTIImpl->getOrCreateResultFromMemIntrinsic(Inst, ExpectedType);
987 }
988 
989 Type *TargetTransformInfo::getMemcpyLoopLoweringType(
990     LLVMContext &Context, Value *Length, unsigned SrcAddrSpace,
991     unsigned DestAddrSpace, unsigned SrcAlign, unsigned DestAlign,
992     Optional<uint32_t> AtomicElementSize) const {
993   return TTIImpl->getMemcpyLoopLoweringType(Context, Length, SrcAddrSpace,
994                                             DestAddrSpace, SrcAlign, DestAlign,
995                                             AtomicElementSize);
996 }
997 
998 void TargetTransformInfo::getMemcpyLoopResidualLoweringType(
999     SmallVectorImpl<Type *> &OpsOut, LLVMContext &Context,
1000     unsigned RemainingBytes, unsigned SrcAddrSpace, unsigned DestAddrSpace,
1001     unsigned SrcAlign, unsigned DestAlign,
1002     Optional<uint32_t> AtomicCpySize) const {
1003   TTIImpl->getMemcpyLoopResidualLoweringType(
1004       OpsOut, Context, RemainingBytes, SrcAddrSpace, DestAddrSpace, SrcAlign,
1005       DestAlign, AtomicCpySize);
1006 }
1007 
1008 bool TargetTransformInfo::areInlineCompatible(const Function *Caller,
1009                                               const Function *Callee) const {
1010   return TTIImpl->areInlineCompatible(Caller, Callee);
1011 }
1012 
1013 bool TargetTransformInfo::areTypesABICompatible(
1014     const Function *Caller, const Function *Callee,
1015     const ArrayRef<Type *> &Types) const {
1016   return TTIImpl->areTypesABICompatible(Caller, Callee, Types);
1017 }
1018 
1019 bool TargetTransformInfo::isIndexedLoadLegal(MemIndexedMode Mode,
1020                                              Type *Ty) const {
1021   return TTIImpl->isIndexedLoadLegal(Mode, Ty);
1022 }
1023 
1024 bool TargetTransformInfo::isIndexedStoreLegal(MemIndexedMode Mode,
1025                                               Type *Ty) const {
1026   return TTIImpl->isIndexedStoreLegal(Mode, Ty);
1027 }
1028 
1029 unsigned TargetTransformInfo::getLoadStoreVecRegBitWidth(unsigned AS) const {
1030   return TTIImpl->getLoadStoreVecRegBitWidth(AS);
1031 }
1032 
1033 bool TargetTransformInfo::isLegalToVectorizeLoad(LoadInst *LI) const {
1034   return TTIImpl->isLegalToVectorizeLoad(LI);
1035 }
1036 
1037 bool TargetTransformInfo::isLegalToVectorizeStore(StoreInst *SI) const {
1038   return TTIImpl->isLegalToVectorizeStore(SI);
1039 }
1040 
1041 bool TargetTransformInfo::isLegalToVectorizeLoadChain(
1042     unsigned ChainSizeInBytes, Align Alignment, unsigned AddrSpace) const {
1043   return TTIImpl->isLegalToVectorizeLoadChain(ChainSizeInBytes, Alignment,
1044                                               AddrSpace);
1045 }
1046 
1047 bool TargetTransformInfo::isLegalToVectorizeStoreChain(
1048     unsigned ChainSizeInBytes, Align Alignment, unsigned AddrSpace) const {
1049   return TTIImpl->isLegalToVectorizeStoreChain(ChainSizeInBytes, Alignment,
1050                                                AddrSpace);
1051 }
1052 
1053 bool TargetTransformInfo::isLegalToVectorizeReduction(
1054     const RecurrenceDescriptor &RdxDesc, ElementCount VF) const {
1055   return TTIImpl->isLegalToVectorizeReduction(RdxDesc, VF);
1056 }
1057 
1058 bool TargetTransformInfo::isElementTypeLegalForScalableVector(Type *Ty) const {
1059   return TTIImpl->isElementTypeLegalForScalableVector(Ty);
1060 }
1061 
1062 unsigned TargetTransformInfo::getLoadVectorFactor(unsigned VF,
1063                                                   unsigned LoadSize,
1064                                                   unsigned ChainSizeInBytes,
1065                                                   VectorType *VecTy) const {
1066   return TTIImpl->getLoadVectorFactor(VF, LoadSize, ChainSizeInBytes, VecTy);
1067 }
1068 
1069 unsigned TargetTransformInfo::getStoreVectorFactor(unsigned VF,
1070                                                    unsigned StoreSize,
1071                                                    unsigned ChainSizeInBytes,
1072                                                    VectorType *VecTy) const {
1073   return TTIImpl->getStoreVectorFactor(VF, StoreSize, ChainSizeInBytes, VecTy);
1074 }
1075 
1076 bool TargetTransformInfo::preferInLoopReduction(unsigned Opcode, Type *Ty,
1077                                                 ReductionFlags Flags) const {
1078   return TTIImpl->preferInLoopReduction(Opcode, Ty, Flags);
1079 }
1080 
1081 bool TargetTransformInfo::preferPredicatedReductionSelect(
1082     unsigned Opcode, Type *Ty, ReductionFlags Flags) const {
1083   return TTIImpl->preferPredicatedReductionSelect(Opcode, Ty, Flags);
1084 }
1085 
1086 TargetTransformInfo::VPLegalization
1087 TargetTransformInfo::getVPLegalizationStrategy(const VPIntrinsic &VPI) const {
1088   return TTIImpl->getVPLegalizationStrategy(VPI);
1089 }
1090 
1091 bool TargetTransformInfo::shouldExpandReduction(const IntrinsicInst *II) const {
1092   return TTIImpl->shouldExpandReduction(II);
1093 }
1094 
1095 unsigned TargetTransformInfo::getGISelRematGlobalCost() const {
1096   return TTIImpl->getGISelRematGlobalCost();
1097 }
1098 
1099 bool TargetTransformInfo::supportsScalableVectors() const {
1100   return TTIImpl->supportsScalableVectors();
1101 }
1102 
1103 bool TargetTransformInfo::enableScalableVectorization() const {
1104   return TTIImpl->enableScalableVectorization();
1105 }
1106 
1107 bool TargetTransformInfo::hasActiveVectorLength(unsigned Opcode, Type *DataType,
1108                                                 Align Alignment) const {
1109   return TTIImpl->hasActiveVectorLength(Opcode, DataType, Alignment);
1110 }
1111 
1112 InstructionCost
1113 TargetTransformInfo::getInstructionLatency(const Instruction *I) const {
1114   return TTIImpl->getInstructionLatency(I);
1115 }
1116 
1117 InstructionCost
1118 TargetTransformInfo::getInstructionThroughput(const Instruction *I) const {
1119   TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
1120 
1121   switch (I->getOpcode()) {
1122   case Instruction::GetElementPtr:
1123   case Instruction::Ret:
1124   case Instruction::PHI:
1125   case Instruction::Br:
1126   case Instruction::Add:
1127   case Instruction::FAdd:
1128   case Instruction::Sub:
1129   case Instruction::FSub:
1130   case Instruction::Mul:
1131   case Instruction::FMul:
1132   case Instruction::UDiv:
1133   case Instruction::SDiv:
1134   case Instruction::FDiv:
1135   case Instruction::URem:
1136   case Instruction::SRem:
1137   case Instruction::FRem:
1138   case Instruction::Shl:
1139   case Instruction::LShr:
1140   case Instruction::AShr:
1141   case Instruction::And:
1142   case Instruction::Or:
1143   case Instruction::Xor:
1144   case Instruction::FNeg:
1145   case Instruction::Select:
1146   case Instruction::ICmp:
1147   case Instruction::FCmp:
1148   case Instruction::Store:
1149   case Instruction::Load:
1150   case Instruction::ZExt:
1151   case Instruction::SExt:
1152   case Instruction::FPToUI:
1153   case Instruction::FPToSI:
1154   case Instruction::FPExt:
1155   case Instruction::PtrToInt:
1156   case Instruction::IntToPtr:
1157   case Instruction::SIToFP:
1158   case Instruction::UIToFP:
1159   case Instruction::Trunc:
1160   case Instruction::FPTrunc:
1161   case Instruction::BitCast:
1162   case Instruction::AddrSpaceCast:
1163   case Instruction::ExtractElement:
1164   case Instruction::InsertElement:
1165   case Instruction::ExtractValue:
1166   case Instruction::ShuffleVector:
1167   case Instruction::Call:
1168   case Instruction::Switch:
1169     return getUserCost(I, CostKind);
1170   default:
1171     // We don't have any information on this instruction.
1172     return -1;
1173   }
1174 }
1175 
1176 TargetTransformInfo::Concept::~Concept() = default;
1177 
1178 TargetIRAnalysis::TargetIRAnalysis() : TTICallback(&getDefaultTTI) {}
1179 
1180 TargetIRAnalysis::TargetIRAnalysis(
1181     std::function<Result(const Function &)> TTICallback)
1182     : TTICallback(std::move(TTICallback)) {}
1183 
1184 TargetIRAnalysis::Result TargetIRAnalysis::run(const Function &F,
1185                                                FunctionAnalysisManager &) {
1186   return TTICallback(F);
1187 }
1188 
1189 AnalysisKey TargetIRAnalysis::Key;
1190 
1191 TargetIRAnalysis::Result TargetIRAnalysis::getDefaultTTI(const Function &F) {
1192   return Result(F.getParent()->getDataLayout());
1193 }
1194 
1195 // Register the basic pass.
1196 INITIALIZE_PASS(TargetTransformInfoWrapperPass, "tti",
1197                 "Target Transform Information", false, true)
1198 char TargetTransformInfoWrapperPass::ID = 0;
1199 
1200 void TargetTransformInfoWrapperPass::anchor() {}
1201 
1202 TargetTransformInfoWrapperPass::TargetTransformInfoWrapperPass()
1203     : ImmutablePass(ID) {
1204   initializeTargetTransformInfoWrapperPassPass(
1205       *PassRegistry::getPassRegistry());
1206 }
1207 
1208 TargetTransformInfoWrapperPass::TargetTransformInfoWrapperPass(
1209     TargetIRAnalysis TIRA)
1210     : ImmutablePass(ID), TIRA(std::move(TIRA)) {
1211   initializeTargetTransformInfoWrapperPassPass(
1212       *PassRegistry::getPassRegistry());
1213 }
1214 
1215 TargetTransformInfo &TargetTransformInfoWrapperPass::getTTI(const Function &F) {
1216   FunctionAnalysisManager DummyFAM;
1217   TTI = TIRA.run(F, DummyFAM);
1218   return *TTI;
1219 }
1220 
1221 ImmutablePass *
1222 llvm::createTargetTransformInfoWrapperPass(TargetIRAnalysis TIRA) {
1223   return new TargetTransformInfoWrapperPass(std::move(TIRA));
1224 }
1225