1 //===- CodeGenPrepare.cpp - Prepare a function for code generation --------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This pass munges the code in the input function to better prepare it for
10 // SelectionDAG-based code generation. This works around limitations in it's
11 // basic-block-at-a-time approach. It should eventually be removed.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "llvm/ADT/APInt.h"
16 #include "llvm/ADT/ArrayRef.h"
17 #include "llvm/ADT/DenseMap.h"
18 #include "llvm/ADT/MapVector.h"
19 #include "llvm/ADT/PointerIntPair.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/ADT/SmallPtrSet.h"
22 #include "llvm/ADT/SmallVector.h"
23 #include "llvm/ADT/Statistic.h"
24 #include "llvm/Analysis/BlockFrequencyInfo.h"
25 #include "llvm/Analysis/BranchProbabilityInfo.h"
26 #include "llvm/Analysis/ConstantFolding.h"
27 #include "llvm/Analysis/InstructionSimplify.h"
28 #include "llvm/Analysis/LoopInfo.h"
29 #include "llvm/Analysis/MemoryBuiltins.h"
30 #include "llvm/Analysis/ProfileSummaryInfo.h"
31 #include "llvm/Analysis/TargetLibraryInfo.h"
32 #include "llvm/Analysis/TargetTransformInfo.h"
33 #include "llvm/Analysis/ValueTracking.h"
34 #include "llvm/Analysis/VectorUtils.h"
35 #include "llvm/CodeGen/Analysis.h"
36 #include "llvm/CodeGen/ISDOpcodes.h"
37 #include "llvm/CodeGen/SelectionDAGNodes.h"
38 #include "llvm/CodeGen/TargetLowering.h"
39 #include "llvm/CodeGen/TargetPassConfig.h"
40 #include "llvm/CodeGen/TargetSubtargetInfo.h"
41 #include "llvm/CodeGen/ValueTypes.h"
42 #include "llvm/Config/llvm-config.h"
43 #include "llvm/IR/Argument.h"
44 #include "llvm/IR/Attributes.h"
45 #include "llvm/IR/BasicBlock.h"
46 #include "llvm/IR/Constant.h"
47 #include "llvm/IR/Constants.h"
48 #include "llvm/IR/DataLayout.h"
49 #include "llvm/IR/DerivedTypes.h"
50 #include "llvm/IR/Dominators.h"
51 #include "llvm/IR/Function.h"
52 #include "llvm/IR/GetElementPtrTypeIterator.h"
53 #include "llvm/IR/GlobalValue.h"
54 #include "llvm/IR/GlobalVariable.h"
55 #include "llvm/IR/IRBuilder.h"
56 #include "llvm/IR/InlineAsm.h"
57 #include "llvm/IR/InstrTypes.h"
58 #include "llvm/IR/Instruction.h"
59 #include "llvm/IR/Instructions.h"
60 #include "llvm/IR/IntrinsicInst.h"
61 #include "llvm/IR/Intrinsics.h"
62 #include "llvm/IR/IntrinsicsAArch64.h"
63 #include "llvm/IR/IntrinsicsX86.h"
64 #include "llvm/IR/LLVMContext.h"
65 #include "llvm/IR/MDBuilder.h"
66 #include "llvm/IR/Module.h"
67 #include "llvm/IR/Operator.h"
68 #include "llvm/IR/PatternMatch.h"
69 #include "llvm/IR/Statepoint.h"
70 #include "llvm/IR/Type.h"
71 #include "llvm/IR/Use.h"
72 #include "llvm/IR/User.h"
73 #include "llvm/IR/Value.h"
74 #include "llvm/IR/ValueHandle.h"
75 #include "llvm/IR/ValueMap.h"
76 #include "llvm/InitializePasses.h"
77 #include "llvm/Pass.h"
78 #include "llvm/Support/BlockFrequency.h"
79 #include "llvm/Support/BranchProbability.h"
80 #include "llvm/Support/Casting.h"
81 #include "llvm/Support/CommandLine.h"
82 #include "llvm/Support/Compiler.h"
83 #include "llvm/Support/Debug.h"
84 #include "llvm/Support/ErrorHandling.h"
85 #include "llvm/Support/MachineValueType.h"
86 #include "llvm/Support/MathExtras.h"
87 #include "llvm/Support/raw_ostream.h"
88 #include "llvm/Target/TargetMachine.h"
89 #include "llvm/Target/TargetOptions.h"
90 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
91 #include "llvm/Transforms/Utils/BypassSlowDivision.h"
92 #include "llvm/Transforms/Utils/Local.h"
93 #include "llvm/Transforms/Utils/SimplifyLibCalls.h"
94 #include "llvm/Transforms/Utils/SizeOpts.h"
95 #include <algorithm>
96 #include <cassert>
97 #include <cstdint>
98 #include <iterator>
99 #include <limits>
100 #include <memory>
101 #include <utility>
102 #include <vector>
103 
104 using namespace llvm;
105 using namespace llvm::PatternMatch;
106 
107 #define DEBUG_TYPE "codegenprepare"
108 
109 STATISTIC(NumBlocksElim, "Number of blocks eliminated");
110 STATISTIC(NumPHIsElim,   "Number of trivial PHIs eliminated");
111 STATISTIC(NumGEPsElim,   "Number of GEPs converted to casts");
112 STATISTIC(NumCmpUses, "Number of uses of Cmp expressions replaced with uses of "
113                       "sunken Cmps");
114 STATISTIC(NumCastUses, "Number of uses of Cast expressions replaced with uses "
115                        "of sunken Casts");
116 STATISTIC(NumMemoryInsts, "Number of memory instructions whose address "
117                           "computations were sunk");
118 STATISTIC(NumMemoryInstsPhiCreated,
119           "Number of phis created when address "
120           "computations were sunk to memory instructions");
121 STATISTIC(NumMemoryInstsSelectCreated,
122           "Number of select created when address "
123           "computations were sunk to memory instructions");
124 STATISTIC(NumExtsMoved,  "Number of [s|z]ext instructions combined with loads");
125 STATISTIC(NumExtUses,    "Number of uses of [s|z]ext instructions optimized");
126 STATISTIC(NumAndsAdded,
127           "Number of and mask instructions added to form ext loads");
128 STATISTIC(NumAndUses, "Number of uses of and mask instructions optimized");
129 STATISTIC(NumRetsDup,    "Number of return instructions duplicated");
130 STATISTIC(NumDbgValueMoved, "Number of debug value instructions moved");
131 STATISTIC(NumSelectsExpanded, "Number of selects turned into branches");
132 STATISTIC(NumStoreExtractExposed, "Number of store(extractelement) exposed");
133 
134 static cl::opt<bool> DisableBranchOpts(
135   "disable-cgp-branch-opts", cl::Hidden, cl::init(false),
136   cl::desc("Disable branch optimizations in CodeGenPrepare"));
137 
138 static cl::opt<bool>
139     DisableGCOpts("disable-cgp-gc-opts", cl::Hidden, cl::init(false),
140                   cl::desc("Disable GC optimizations in CodeGenPrepare"));
141 
142 static cl::opt<bool> DisableSelectToBranch(
143   "disable-cgp-select2branch", cl::Hidden, cl::init(false),
144   cl::desc("Disable select to branch conversion."));
145 
146 static cl::opt<bool> AddrSinkUsingGEPs(
147   "addr-sink-using-gep", cl::Hidden, cl::init(true),
148   cl::desc("Address sinking in CGP using GEPs."));
149 
150 static cl::opt<bool> EnableAndCmpSinking(
151    "enable-andcmp-sinking", cl::Hidden, cl::init(true),
152    cl::desc("Enable sinkinig and/cmp into branches."));
153 
154 static cl::opt<bool> DisableStoreExtract(
155     "disable-cgp-store-extract", cl::Hidden, cl::init(false),
156     cl::desc("Disable store(extract) optimizations in CodeGenPrepare"));
157 
158 static cl::opt<bool> StressStoreExtract(
159     "stress-cgp-store-extract", cl::Hidden, cl::init(false),
160     cl::desc("Stress test store(extract) optimizations in CodeGenPrepare"));
161 
162 static cl::opt<bool> DisableExtLdPromotion(
163     "disable-cgp-ext-ld-promotion", cl::Hidden, cl::init(false),
164     cl::desc("Disable ext(promotable(ld)) -> promoted(ext(ld)) optimization in "
165              "CodeGenPrepare"));
166 
167 static cl::opt<bool> StressExtLdPromotion(
168     "stress-cgp-ext-ld-promotion", cl::Hidden, cl::init(false),
169     cl::desc("Stress test ext(promotable(ld)) -> promoted(ext(ld)) "
170              "optimization in CodeGenPrepare"));
171 
172 static cl::opt<bool> DisablePreheaderProtect(
173     "disable-preheader-prot", cl::Hidden, cl::init(false),
174     cl::desc("Disable protection against removing loop preheaders"));
175 
176 static cl::opt<bool> ProfileGuidedSectionPrefix(
177     "profile-guided-section-prefix", cl::Hidden, cl::init(true), cl::ZeroOrMore,
178     cl::desc("Use profile info to add section prefix for hot/cold functions"));
179 
180 static cl::opt<bool> ProfileUnknownInSpecialSection(
181     "profile-unknown-in-special-section", cl::Hidden, cl::init(false),
182     cl::ZeroOrMore,
183     cl::desc("In profiling mode like sampleFDO, if a function doesn't have "
184              "profile, we cannot tell the function is cold for sure because "
185              "it may be a function newly added without ever being sampled. "
186              "With the flag enabled, compiler can put such profile unknown "
187              "functions into a special section, so runtime system can choose "
188              "to handle it in a different way than .text section, to save "
189              "RAM for example. "));
190 
191 static cl::opt<unsigned> FreqRatioToSkipMerge(
192     "cgp-freq-ratio-to-skip-merge", cl::Hidden, cl::init(2),
193     cl::desc("Skip merging empty blocks if (frequency of empty block) / "
194              "(frequency of destination block) is greater than this ratio"));
195 
196 static cl::opt<bool> ForceSplitStore(
197     "force-split-store", cl::Hidden, cl::init(false),
198     cl::desc("Force store splitting no matter what the target query says."));
199 
200 static cl::opt<bool>
201 EnableTypePromotionMerge("cgp-type-promotion-merge", cl::Hidden,
202     cl::desc("Enable merging of redundant sexts when one is dominating"
203     " the other."), cl::init(true));
204 
205 static cl::opt<bool> DisableComplexAddrModes(
206     "disable-complex-addr-modes", cl::Hidden, cl::init(false),
207     cl::desc("Disables combining addressing modes with different parts "
208              "in optimizeMemoryInst."));
209 
210 static cl::opt<bool>
211 AddrSinkNewPhis("addr-sink-new-phis", cl::Hidden, cl::init(false),
212                 cl::desc("Allow creation of Phis in Address sinking."));
213 
214 static cl::opt<bool>
215 AddrSinkNewSelects("addr-sink-new-select", cl::Hidden, cl::init(true),
216                    cl::desc("Allow creation of selects in Address sinking."));
217 
218 static cl::opt<bool> AddrSinkCombineBaseReg(
219     "addr-sink-combine-base-reg", cl::Hidden, cl::init(true),
220     cl::desc("Allow combining of BaseReg field in Address sinking."));
221 
222 static cl::opt<bool> AddrSinkCombineBaseGV(
223     "addr-sink-combine-base-gv", cl::Hidden, cl::init(true),
224     cl::desc("Allow combining of BaseGV field in Address sinking."));
225 
226 static cl::opt<bool> AddrSinkCombineBaseOffs(
227     "addr-sink-combine-base-offs", cl::Hidden, cl::init(true),
228     cl::desc("Allow combining of BaseOffs field in Address sinking."));
229 
230 static cl::opt<bool> AddrSinkCombineScaledReg(
231     "addr-sink-combine-scaled-reg", cl::Hidden, cl::init(true),
232     cl::desc("Allow combining of ScaledReg field in Address sinking."));
233 
234 static cl::opt<bool>
235     EnableGEPOffsetSplit("cgp-split-large-offset-gep", cl::Hidden,
236                          cl::init(true),
237                          cl::desc("Enable splitting large offset of GEP."));
238 
239 static cl::opt<bool> EnableICMP_EQToICMP_ST(
240     "cgp-icmp-eq2icmp-st", cl::Hidden, cl::init(false),
241     cl::desc("Enable ICMP_EQ to ICMP_S(L|G)T conversion."));
242 
243 static cl::opt<bool>
244     VerifyBFIUpdates("cgp-verify-bfi-updates", cl::Hidden, cl::init(false),
245                      cl::desc("Enable BFI update verification for "
246                               "CodeGenPrepare."));
247 
248 namespace {
249 
250 enum ExtType {
251   ZeroExtension,   // Zero extension has been seen.
252   SignExtension,   // Sign extension has been seen.
253   BothExtension    // This extension type is used if we saw sext after
254                    // ZeroExtension had been set, or if we saw zext after
255                    // SignExtension had been set. It makes the type
256                    // information of a promoted instruction invalid.
257 };
258 
259 using SetOfInstrs = SmallPtrSet<Instruction *, 16>;
260 using TypeIsSExt = PointerIntPair<Type *, 2, ExtType>;
261 using InstrToOrigTy = DenseMap<Instruction *, TypeIsSExt>;
262 using SExts = SmallVector<Instruction *, 16>;
263 using ValueToSExts = DenseMap<Value *, SExts>;
264 
265 class TypePromotionTransaction;
266 
267   class CodeGenPrepare : public FunctionPass {
268     const TargetMachine *TM = nullptr;
269     const TargetSubtargetInfo *SubtargetInfo;
270     const TargetLowering *TLI = nullptr;
271     const TargetRegisterInfo *TRI;
272     const TargetTransformInfo *TTI = nullptr;
273     const TargetLibraryInfo *TLInfo;
274     const LoopInfo *LI;
275     std::unique_ptr<BlockFrequencyInfo> BFI;
276     std::unique_ptr<BranchProbabilityInfo> BPI;
277     ProfileSummaryInfo *PSI;
278 
279     /// As we scan instructions optimizing them, this is the next instruction
280     /// to optimize. Transforms that can invalidate this should update it.
281     BasicBlock::iterator CurInstIterator;
282 
283     /// Keeps track of non-local addresses that have been sunk into a block.
284     /// This allows us to avoid inserting duplicate code for blocks with
285     /// multiple load/stores of the same address. The usage of WeakTrackingVH
286     /// enables SunkAddrs to be treated as a cache whose entries can be
287     /// invalidated if a sunken address computation has been erased.
288     ValueMap<Value*, WeakTrackingVH> SunkAddrs;
289 
290     /// Keeps track of all instructions inserted for the current function.
291     SetOfInstrs InsertedInsts;
292 
293     /// Keeps track of the type of the related instruction before their
294     /// promotion for the current function.
295     InstrToOrigTy PromotedInsts;
296 
297     /// Keep track of instructions removed during promotion.
298     SetOfInstrs RemovedInsts;
299 
300     /// Keep track of sext chains based on their initial value.
301     DenseMap<Value *, Instruction *> SeenChainsForSExt;
302 
303     /// Keep track of GEPs accessing the same data structures such as structs or
304     /// arrays that are candidates to be split later because of their large
305     /// size.
306     MapVector<
307         AssertingVH<Value>,
308         SmallVector<std::pair<AssertingVH<GetElementPtrInst>, int64_t>, 32>>
309         LargeOffsetGEPMap;
310 
311     /// Keep track of new GEP base after splitting the GEPs having large offset.
312     SmallSet<AssertingVH<Value>, 2> NewGEPBases;
313 
314     /// Map serial numbers to Large offset GEPs.
315     DenseMap<AssertingVH<GetElementPtrInst>, int> LargeOffsetGEPID;
316 
317     /// Keep track of SExt promoted.
318     ValueToSExts ValToSExtendedUses;
319 
320     /// True if the function has the OptSize attribute.
321     bool OptSize;
322 
323     /// DataLayout for the Function being processed.
324     const DataLayout *DL = nullptr;
325 
326     /// Building the dominator tree can be expensive, so we only build it
327     /// lazily and update it when required.
328     std::unique_ptr<DominatorTree> DT;
329 
330   public:
331     static char ID; // Pass identification, replacement for typeid
332 
333     CodeGenPrepare() : FunctionPass(ID) {
334       initializeCodeGenPreparePass(*PassRegistry::getPassRegistry());
335     }
336 
337     bool runOnFunction(Function &F) override;
338 
339     StringRef getPassName() const override { return "CodeGen Prepare"; }
340 
341     void getAnalysisUsage(AnalysisUsage &AU) const override {
342       // FIXME: When we can selectively preserve passes, preserve the domtree.
343       AU.addRequired<ProfileSummaryInfoWrapperPass>();
344       AU.addRequired<TargetLibraryInfoWrapperPass>();
345       AU.addRequired<TargetPassConfig>();
346       AU.addRequired<TargetTransformInfoWrapperPass>();
347       AU.addRequired<LoopInfoWrapperPass>();
348     }
349 
350   private:
351     template <typename F>
352     void resetIteratorIfInvalidatedWhileCalling(BasicBlock *BB, F f) {
353       // Substituting can cause recursive simplifications, which can invalidate
354       // our iterator.  Use a WeakTrackingVH to hold onto it in case this
355       // happens.
356       Value *CurValue = &*CurInstIterator;
357       WeakTrackingVH IterHandle(CurValue);
358 
359       f();
360 
361       // If the iterator instruction was recursively deleted, start over at the
362       // start of the block.
363       if (IterHandle != CurValue) {
364         CurInstIterator = BB->begin();
365         SunkAddrs.clear();
366       }
367     }
368 
369     // Get the DominatorTree, building if necessary.
370     DominatorTree &getDT(Function &F) {
371       if (!DT)
372         DT = std::make_unique<DominatorTree>(F);
373       return *DT;
374     }
375 
376     bool eliminateFallThrough(Function &F);
377     bool eliminateMostlyEmptyBlocks(Function &F);
378     BasicBlock *findDestBlockOfMergeableEmptyBlock(BasicBlock *BB);
379     bool canMergeBlocks(const BasicBlock *BB, const BasicBlock *DestBB) const;
380     void eliminateMostlyEmptyBlock(BasicBlock *BB);
381     bool isMergingEmptyBlockProfitable(BasicBlock *BB, BasicBlock *DestBB,
382                                        bool isPreheader);
383     bool optimizeBlock(BasicBlock &BB, bool &ModifiedDT);
384     bool optimizeInst(Instruction *I, bool &ModifiedDT);
385     bool optimizeMemoryInst(Instruction *MemoryInst, Value *Addr,
386                             Type *AccessTy, unsigned AddrSpace);
387     bool optimizeGatherScatterInst(Instruction *MemoryInst, Value *Ptr);
388     bool optimizeInlineAsmInst(CallInst *CS);
389     bool optimizeCallInst(CallInst *CI, bool &ModifiedDT);
390     bool optimizeExt(Instruction *&I);
391     bool optimizeExtUses(Instruction *I);
392     bool optimizeLoadExt(LoadInst *Load);
393     bool optimizeShiftInst(BinaryOperator *BO);
394     bool optimizeFunnelShift(IntrinsicInst *Fsh);
395     bool optimizeSelectInst(SelectInst *SI);
396     bool optimizeShuffleVectorInst(ShuffleVectorInst *SVI);
397     bool optimizeSwitchInst(SwitchInst *SI);
398     bool optimizeExtractElementInst(Instruction *Inst);
399     bool dupRetToEnableTailCallOpts(BasicBlock *BB, bool &ModifiedDT);
400     bool fixupDbgValue(Instruction *I);
401     bool placeDbgValues(Function &F);
402     bool canFormExtLd(const SmallVectorImpl<Instruction *> &MovedExts,
403                       LoadInst *&LI, Instruction *&Inst, bool HasPromoted);
404     bool tryToPromoteExts(TypePromotionTransaction &TPT,
405                           const SmallVectorImpl<Instruction *> &Exts,
406                           SmallVectorImpl<Instruction *> &ProfitablyMovedExts,
407                           unsigned CreatedInstsCost = 0);
408     bool mergeSExts(Function &F);
409     bool splitLargeGEPOffsets();
410     bool performAddressTypePromotion(
411         Instruction *&Inst,
412         bool AllowPromotionWithoutCommonHeader,
413         bool HasPromoted, TypePromotionTransaction &TPT,
414         SmallVectorImpl<Instruction *> &SpeculativelyMovedExts);
415     bool splitBranchCondition(Function &F, bool &ModifiedDT);
416     bool simplifyOffsetableRelocate(GCStatepointInst &I);
417 
418     bool tryToSinkFreeOperands(Instruction *I);
419     bool replaceMathCmpWithIntrinsic(BinaryOperator *BO, Value *Arg0,
420                                      Value *Arg1, CmpInst *Cmp,
421                                      Intrinsic::ID IID);
422     bool optimizeCmp(CmpInst *Cmp, bool &ModifiedDT);
423     bool combineToUSubWithOverflow(CmpInst *Cmp, bool &ModifiedDT);
424     bool combineToUAddWithOverflow(CmpInst *Cmp, bool &ModifiedDT);
425     void verifyBFIUpdates(Function &F);
426   };
427 
428 } // end anonymous namespace
429 
430 char CodeGenPrepare::ID = 0;
431 
432 INITIALIZE_PASS_BEGIN(CodeGenPrepare, DEBUG_TYPE,
433                       "Optimize for code generation", false, false)
434 INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass)
435 INITIALIZE_PASS_END(CodeGenPrepare, DEBUG_TYPE,
436                     "Optimize for code generation", false, false)
437 
438 FunctionPass *llvm::createCodeGenPreparePass() { return new CodeGenPrepare(); }
439 
440 bool CodeGenPrepare::runOnFunction(Function &F) {
441   if (skipFunction(F))
442     return false;
443 
444   DL = &F.getParent()->getDataLayout();
445 
446   bool EverMadeChange = false;
447   // Clear per function information.
448   InsertedInsts.clear();
449   PromotedInsts.clear();
450 
451   TM = &getAnalysis<TargetPassConfig>().getTM<TargetMachine>();
452   SubtargetInfo = TM->getSubtargetImpl(F);
453   TLI = SubtargetInfo->getTargetLowering();
454   TRI = SubtargetInfo->getRegisterInfo();
455   TLInfo = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
456   TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
457   LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
458   BPI.reset(new BranchProbabilityInfo(F, *LI));
459   BFI.reset(new BlockFrequencyInfo(F, *BPI, *LI));
460   PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI();
461   OptSize = F.hasOptSize();
462   if (ProfileGuidedSectionPrefix) {
463     if (PSI->isFunctionHotInCallGraph(&F, *BFI))
464       F.setSectionPrefix(".hot");
465     else if (PSI->isFunctionColdInCallGraph(&F, *BFI))
466       F.setSectionPrefix(".unlikely");
467     else if (ProfileUnknownInSpecialSection && PSI->hasPartialSampleProfile() &&
468              PSI->isFunctionHotnessUnknown(F))
469       F.setSectionPrefix(".unknown");
470   }
471 
472   /// This optimization identifies DIV instructions that can be
473   /// profitably bypassed and carried out with a shorter, faster divide.
474   if (!OptSize && !PSI->hasHugeWorkingSetSize() && TLI->isSlowDivBypassed()) {
475     const DenseMap<unsigned int, unsigned int> &BypassWidths =
476         TLI->getBypassSlowDivWidths();
477     BasicBlock* BB = &*F.begin();
478     while (BB != nullptr) {
479       // bypassSlowDivision may create new BBs, but we don't want to reapply the
480       // optimization to those blocks.
481       BasicBlock* Next = BB->getNextNode();
482       // F.hasOptSize is already checked in the outer if statement.
483       if (!llvm::shouldOptimizeForSize(BB, PSI, BFI.get()))
484         EverMadeChange |= bypassSlowDivision(BB, BypassWidths);
485       BB = Next;
486     }
487   }
488 
489   // Eliminate blocks that contain only PHI nodes and an
490   // unconditional branch.
491   EverMadeChange |= eliminateMostlyEmptyBlocks(F);
492 
493   bool ModifiedDT = false;
494   if (!DisableBranchOpts)
495     EverMadeChange |= splitBranchCondition(F, ModifiedDT);
496 
497   // Split some critical edges where one of the sources is an indirect branch,
498   // to help generate sane code for PHIs involving such edges.
499   EverMadeChange |= SplitIndirectBrCriticalEdges(F);
500 
501   bool MadeChange = true;
502   while (MadeChange) {
503     MadeChange = false;
504     DT.reset();
505     for (Function::iterator I = F.begin(); I != F.end(); ) {
506       BasicBlock *BB = &*I++;
507       bool ModifiedDTOnIteration = false;
508       MadeChange |= optimizeBlock(*BB, ModifiedDTOnIteration);
509 
510       // Restart BB iteration if the dominator tree of the Function was changed
511       if (ModifiedDTOnIteration)
512         break;
513     }
514     if (EnableTypePromotionMerge && !ValToSExtendedUses.empty())
515       MadeChange |= mergeSExts(F);
516     if (!LargeOffsetGEPMap.empty())
517       MadeChange |= splitLargeGEPOffsets();
518 
519     if (MadeChange)
520       eliminateFallThrough(F);
521 
522     // Really free removed instructions during promotion.
523     for (Instruction *I : RemovedInsts)
524       I->deleteValue();
525 
526     EverMadeChange |= MadeChange;
527     SeenChainsForSExt.clear();
528     ValToSExtendedUses.clear();
529     RemovedInsts.clear();
530     LargeOffsetGEPMap.clear();
531     LargeOffsetGEPID.clear();
532   }
533 
534   SunkAddrs.clear();
535 
536   if (!DisableBranchOpts) {
537     MadeChange = false;
538     // Use a set vector to get deterministic iteration order. The order the
539     // blocks are removed may affect whether or not PHI nodes in successors
540     // are removed.
541     SmallSetVector<BasicBlock*, 8> WorkList;
542     for (BasicBlock &BB : F) {
543       SmallVector<BasicBlock *, 2> Successors(succ_begin(&BB), succ_end(&BB));
544       MadeChange |= ConstantFoldTerminator(&BB, true);
545       if (!MadeChange) continue;
546 
547       for (SmallVectorImpl<BasicBlock*>::iterator
548              II = Successors.begin(), IE = Successors.end(); II != IE; ++II)
549         if (pred_begin(*II) == pred_end(*II))
550           WorkList.insert(*II);
551     }
552 
553     // Delete the dead blocks and any of their dead successors.
554     MadeChange |= !WorkList.empty();
555     while (!WorkList.empty()) {
556       BasicBlock *BB = WorkList.pop_back_val();
557       SmallVector<BasicBlock*, 2> Successors(succ_begin(BB), succ_end(BB));
558 
559       DeleteDeadBlock(BB);
560 
561       for (SmallVectorImpl<BasicBlock*>::iterator
562              II = Successors.begin(), IE = Successors.end(); II != IE; ++II)
563         if (pred_begin(*II) == pred_end(*II))
564           WorkList.insert(*II);
565     }
566 
567     // Merge pairs of basic blocks with unconditional branches, connected by
568     // a single edge.
569     if (EverMadeChange || MadeChange)
570       MadeChange |= eliminateFallThrough(F);
571 
572     EverMadeChange |= MadeChange;
573   }
574 
575   if (!DisableGCOpts) {
576     SmallVector<GCStatepointInst *, 2> Statepoints;
577     for (BasicBlock &BB : F)
578       for (Instruction &I : BB)
579         if (auto *SP = dyn_cast<GCStatepointInst>(&I))
580           Statepoints.push_back(SP);
581     for (auto &I : Statepoints)
582       EverMadeChange |= simplifyOffsetableRelocate(*I);
583   }
584 
585   // Do this last to clean up use-before-def scenarios introduced by other
586   // preparatory transforms.
587   EverMadeChange |= placeDbgValues(F);
588 
589 #ifndef NDEBUG
590   if (VerifyBFIUpdates)
591     verifyBFIUpdates(F);
592 #endif
593 
594   return EverMadeChange;
595 }
596 
597 // Verify BFI has been updated correctly by recomputing BFI and comparing them.
598 void LLVM_ATTRIBUTE_UNUSED CodeGenPrepare::verifyBFIUpdates(Function &F) {
599   DominatorTree NewDT(F);
600   LoopInfo NewLI(NewDT);
601   BranchProbabilityInfo NewBPI(F, NewLI, TLInfo);
602   BlockFrequencyInfo NewBFI(F, NewBPI, NewLI);
603   NewBFI.verifyMatch(*BFI);
604 }
605 
606 /// Merge basic blocks which are connected by a single edge, where one of the
607 /// basic blocks has a single successor pointing to the other basic block,
608 /// which has a single predecessor.
609 bool CodeGenPrepare::eliminateFallThrough(Function &F) {
610   bool Changed = false;
611   // Scan all of the blocks in the function, except for the entry block.
612   // Use a temporary array to avoid iterator being invalidated when
613   // deleting blocks.
614   SmallVector<WeakTrackingVH, 16> Blocks;
615   for (auto &Block : llvm::make_range(std::next(F.begin()), F.end()))
616     Blocks.push_back(&Block);
617 
618   for (auto &Block : Blocks) {
619     auto *BB = cast_or_null<BasicBlock>(Block);
620     if (!BB)
621       continue;
622     // If the destination block has a single pred, then this is a trivial
623     // edge, just collapse it.
624     BasicBlock *SinglePred = BB->getSinglePredecessor();
625 
626     // Don't merge if BB's address is taken.
627     if (!SinglePred || SinglePred == BB || BB->hasAddressTaken()) continue;
628 
629     BranchInst *Term = dyn_cast<BranchInst>(SinglePred->getTerminator());
630     if (Term && !Term->isConditional()) {
631       Changed = true;
632       LLVM_DEBUG(dbgs() << "To merge:\n" << *BB << "\n\n\n");
633 
634       // Merge BB into SinglePred and delete it.
635       MergeBlockIntoPredecessor(BB);
636     }
637   }
638   return Changed;
639 }
640 
641 /// Find a destination block from BB if BB is mergeable empty block.
642 BasicBlock *CodeGenPrepare::findDestBlockOfMergeableEmptyBlock(BasicBlock *BB) {
643   // If this block doesn't end with an uncond branch, ignore it.
644   BranchInst *BI = dyn_cast<BranchInst>(BB->getTerminator());
645   if (!BI || !BI->isUnconditional())
646     return nullptr;
647 
648   // If the instruction before the branch (skipping debug info) isn't a phi
649   // node, then other stuff is happening here.
650   BasicBlock::iterator BBI = BI->getIterator();
651   if (BBI != BB->begin()) {
652     --BBI;
653     while (isa<DbgInfoIntrinsic>(BBI)) {
654       if (BBI == BB->begin())
655         break;
656       --BBI;
657     }
658     if (!isa<DbgInfoIntrinsic>(BBI) && !isa<PHINode>(BBI))
659       return nullptr;
660   }
661 
662   // Do not break infinite loops.
663   BasicBlock *DestBB = BI->getSuccessor(0);
664   if (DestBB == BB)
665     return nullptr;
666 
667   if (!canMergeBlocks(BB, DestBB))
668     DestBB = nullptr;
669 
670   return DestBB;
671 }
672 
673 /// Eliminate blocks that contain only PHI nodes, debug info directives, and an
674 /// unconditional branch. Passes before isel (e.g. LSR/loopsimplify) often split
675 /// edges in ways that are non-optimal for isel. Start by eliminating these
676 /// blocks so we can split them the way we want them.
677 bool CodeGenPrepare::eliminateMostlyEmptyBlocks(Function &F) {
678   SmallPtrSet<BasicBlock *, 16> Preheaders;
679   SmallVector<Loop *, 16> LoopList(LI->begin(), LI->end());
680   while (!LoopList.empty()) {
681     Loop *L = LoopList.pop_back_val();
682     LoopList.insert(LoopList.end(), L->begin(), L->end());
683     if (BasicBlock *Preheader = L->getLoopPreheader())
684       Preheaders.insert(Preheader);
685   }
686 
687   bool MadeChange = false;
688   // Copy blocks into a temporary array to avoid iterator invalidation issues
689   // as we remove them.
690   // Note that this intentionally skips the entry block.
691   SmallVector<WeakTrackingVH, 16> Blocks;
692   for (auto &Block : llvm::make_range(std::next(F.begin()), F.end()))
693     Blocks.push_back(&Block);
694 
695   for (auto &Block : Blocks) {
696     BasicBlock *BB = cast_or_null<BasicBlock>(Block);
697     if (!BB)
698       continue;
699     BasicBlock *DestBB = findDestBlockOfMergeableEmptyBlock(BB);
700     if (!DestBB ||
701         !isMergingEmptyBlockProfitable(BB, DestBB, Preheaders.count(BB)))
702       continue;
703 
704     eliminateMostlyEmptyBlock(BB);
705     MadeChange = true;
706   }
707   return MadeChange;
708 }
709 
710 bool CodeGenPrepare::isMergingEmptyBlockProfitable(BasicBlock *BB,
711                                                    BasicBlock *DestBB,
712                                                    bool isPreheader) {
713   // Do not delete loop preheaders if doing so would create a critical edge.
714   // Loop preheaders can be good locations to spill registers. If the
715   // preheader is deleted and we create a critical edge, registers may be
716   // spilled in the loop body instead.
717   if (!DisablePreheaderProtect && isPreheader &&
718       !(BB->getSinglePredecessor() &&
719         BB->getSinglePredecessor()->getSingleSuccessor()))
720     return false;
721 
722   // Skip merging if the block's successor is also a successor to any callbr
723   // that leads to this block.
724   // FIXME: Is this really needed? Is this a correctness issue?
725   for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) {
726     if (auto *CBI = dyn_cast<CallBrInst>((*PI)->getTerminator()))
727       for (unsigned i = 0, e = CBI->getNumSuccessors(); i != e; ++i)
728         if (DestBB == CBI->getSuccessor(i))
729           return false;
730   }
731 
732   // Try to skip merging if the unique predecessor of BB is terminated by a
733   // switch or indirect branch instruction, and BB is used as an incoming block
734   // of PHIs in DestBB. In such case, merging BB and DestBB would cause ISel to
735   // add COPY instructions in the predecessor of BB instead of BB (if it is not
736   // merged). Note that the critical edge created by merging such blocks wont be
737   // split in MachineSink because the jump table is not analyzable. By keeping
738   // such empty block (BB), ISel will place COPY instructions in BB, not in the
739   // predecessor of BB.
740   BasicBlock *Pred = BB->getUniquePredecessor();
741   if (!Pred ||
742       !(isa<SwitchInst>(Pred->getTerminator()) ||
743         isa<IndirectBrInst>(Pred->getTerminator())))
744     return true;
745 
746   if (BB->getTerminator() != BB->getFirstNonPHIOrDbg())
747     return true;
748 
749   // We use a simple cost heuristic which determine skipping merging is
750   // profitable if the cost of skipping merging is less than the cost of
751   // merging : Cost(skipping merging) < Cost(merging BB), where the
752   // Cost(skipping merging) is Freq(BB) * (Cost(Copy) + Cost(Branch)), and
753   // the Cost(merging BB) is Freq(Pred) * Cost(Copy).
754   // Assuming Cost(Copy) == Cost(Branch), we could simplify it to :
755   //   Freq(Pred) / Freq(BB) > 2.
756   // Note that if there are multiple empty blocks sharing the same incoming
757   // value for the PHIs in the DestBB, we consider them together. In such
758   // case, Cost(merging BB) will be the sum of their frequencies.
759 
760   if (!isa<PHINode>(DestBB->begin()))
761     return true;
762 
763   SmallPtrSet<BasicBlock *, 16> SameIncomingValueBBs;
764 
765   // Find all other incoming blocks from which incoming values of all PHIs in
766   // DestBB are the same as the ones from BB.
767   for (pred_iterator PI = pred_begin(DestBB), E = pred_end(DestBB); PI != E;
768        ++PI) {
769     BasicBlock *DestBBPred = *PI;
770     if (DestBBPred == BB)
771       continue;
772 
773     if (llvm::all_of(DestBB->phis(), [&](const PHINode &DestPN) {
774           return DestPN.getIncomingValueForBlock(BB) ==
775                  DestPN.getIncomingValueForBlock(DestBBPred);
776         }))
777       SameIncomingValueBBs.insert(DestBBPred);
778   }
779 
780   // See if all BB's incoming values are same as the value from Pred. In this
781   // case, no reason to skip merging because COPYs are expected to be place in
782   // Pred already.
783   if (SameIncomingValueBBs.count(Pred))
784     return true;
785 
786   BlockFrequency PredFreq = BFI->getBlockFreq(Pred);
787   BlockFrequency BBFreq = BFI->getBlockFreq(BB);
788 
789   for (auto *SameValueBB : SameIncomingValueBBs)
790     if (SameValueBB->getUniquePredecessor() == Pred &&
791         DestBB == findDestBlockOfMergeableEmptyBlock(SameValueBB))
792       BBFreq += BFI->getBlockFreq(SameValueBB);
793 
794   return PredFreq.getFrequency() <=
795          BBFreq.getFrequency() * FreqRatioToSkipMerge;
796 }
797 
798 /// Return true if we can merge BB into DestBB if there is a single
799 /// unconditional branch between them, and BB contains no other non-phi
800 /// instructions.
801 bool CodeGenPrepare::canMergeBlocks(const BasicBlock *BB,
802                                     const BasicBlock *DestBB) const {
803   // We only want to eliminate blocks whose phi nodes are used by phi nodes in
804   // the successor.  If there are more complex condition (e.g. preheaders),
805   // don't mess around with them.
806   for (const PHINode &PN : BB->phis()) {
807     for (const User *U : PN.users()) {
808       const Instruction *UI = cast<Instruction>(U);
809       if (UI->getParent() != DestBB || !isa<PHINode>(UI))
810         return false;
811       // If User is inside DestBB block and it is a PHINode then check
812       // incoming value. If incoming value is not from BB then this is
813       // a complex condition (e.g. preheaders) we want to avoid here.
814       if (UI->getParent() == DestBB) {
815         if (const PHINode *UPN = dyn_cast<PHINode>(UI))
816           for (unsigned I = 0, E = UPN->getNumIncomingValues(); I != E; ++I) {
817             Instruction *Insn = dyn_cast<Instruction>(UPN->getIncomingValue(I));
818             if (Insn && Insn->getParent() == BB &&
819                 Insn->getParent() != UPN->getIncomingBlock(I))
820               return false;
821           }
822       }
823     }
824   }
825 
826   // If BB and DestBB contain any common predecessors, then the phi nodes in BB
827   // and DestBB may have conflicting incoming values for the block.  If so, we
828   // can't merge the block.
829   const PHINode *DestBBPN = dyn_cast<PHINode>(DestBB->begin());
830   if (!DestBBPN) return true;  // no conflict.
831 
832   // Collect the preds of BB.
833   SmallPtrSet<const BasicBlock*, 16> BBPreds;
834   if (const PHINode *BBPN = dyn_cast<PHINode>(BB->begin())) {
835     // It is faster to get preds from a PHI than with pred_iterator.
836     for (unsigned i = 0, e = BBPN->getNumIncomingValues(); i != e; ++i)
837       BBPreds.insert(BBPN->getIncomingBlock(i));
838   } else {
839     BBPreds.insert(pred_begin(BB), pred_end(BB));
840   }
841 
842   // Walk the preds of DestBB.
843   for (unsigned i = 0, e = DestBBPN->getNumIncomingValues(); i != e; ++i) {
844     BasicBlock *Pred = DestBBPN->getIncomingBlock(i);
845     if (BBPreds.count(Pred)) {   // Common predecessor?
846       for (const PHINode &PN : DestBB->phis()) {
847         const Value *V1 = PN.getIncomingValueForBlock(Pred);
848         const Value *V2 = PN.getIncomingValueForBlock(BB);
849 
850         // If V2 is a phi node in BB, look up what the mapped value will be.
851         if (const PHINode *V2PN = dyn_cast<PHINode>(V2))
852           if (V2PN->getParent() == BB)
853             V2 = V2PN->getIncomingValueForBlock(Pred);
854 
855         // If there is a conflict, bail out.
856         if (V1 != V2) return false;
857       }
858     }
859   }
860 
861   return true;
862 }
863 
864 /// Eliminate a basic block that has only phi's and an unconditional branch in
865 /// it.
866 void CodeGenPrepare::eliminateMostlyEmptyBlock(BasicBlock *BB) {
867   BranchInst *BI = cast<BranchInst>(BB->getTerminator());
868   BasicBlock *DestBB = BI->getSuccessor(0);
869 
870   LLVM_DEBUG(dbgs() << "MERGING MOSTLY EMPTY BLOCKS - BEFORE:\n"
871                     << *BB << *DestBB);
872 
873   // If the destination block has a single pred, then this is a trivial edge,
874   // just collapse it.
875   if (BasicBlock *SinglePred = DestBB->getSinglePredecessor()) {
876     if (SinglePred != DestBB) {
877       assert(SinglePred == BB &&
878              "Single predecessor not the same as predecessor");
879       // Merge DestBB into SinglePred/BB and delete it.
880       MergeBlockIntoPredecessor(DestBB);
881       // Note: BB(=SinglePred) will not be deleted on this path.
882       // DestBB(=its single successor) is the one that was deleted.
883       LLVM_DEBUG(dbgs() << "AFTER:\n" << *SinglePred << "\n\n\n");
884       return;
885     }
886   }
887 
888   // Otherwise, we have multiple predecessors of BB.  Update the PHIs in DestBB
889   // to handle the new incoming edges it is about to have.
890   for (PHINode &PN : DestBB->phis()) {
891     // Remove the incoming value for BB, and remember it.
892     Value *InVal = PN.removeIncomingValue(BB, false);
893 
894     // Two options: either the InVal is a phi node defined in BB or it is some
895     // value that dominates BB.
896     PHINode *InValPhi = dyn_cast<PHINode>(InVal);
897     if (InValPhi && InValPhi->getParent() == BB) {
898       // Add all of the input values of the input PHI as inputs of this phi.
899       for (unsigned i = 0, e = InValPhi->getNumIncomingValues(); i != e; ++i)
900         PN.addIncoming(InValPhi->getIncomingValue(i),
901                        InValPhi->getIncomingBlock(i));
902     } else {
903       // Otherwise, add one instance of the dominating value for each edge that
904       // we will be adding.
905       if (PHINode *BBPN = dyn_cast<PHINode>(BB->begin())) {
906         for (unsigned i = 0, e = BBPN->getNumIncomingValues(); i != e; ++i)
907           PN.addIncoming(InVal, BBPN->getIncomingBlock(i));
908       } else {
909         for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI)
910           PN.addIncoming(InVal, *PI);
911       }
912     }
913   }
914 
915   // The PHIs are now updated, change everything that refers to BB to use
916   // DestBB and remove BB.
917   BB->replaceAllUsesWith(DestBB);
918   BB->eraseFromParent();
919   ++NumBlocksElim;
920 
921   LLVM_DEBUG(dbgs() << "AFTER:\n" << *DestBB << "\n\n\n");
922 }
923 
924 // Computes a map of base pointer relocation instructions to corresponding
925 // derived pointer relocation instructions given a vector of all relocate calls
926 static void computeBaseDerivedRelocateMap(
927     const SmallVectorImpl<GCRelocateInst *> &AllRelocateCalls,
928     DenseMap<GCRelocateInst *, SmallVector<GCRelocateInst *, 2>>
929         &RelocateInstMap) {
930   // Collect information in two maps: one primarily for locating the base object
931   // while filling the second map; the second map is the final structure holding
932   // a mapping between Base and corresponding Derived relocate calls
933   DenseMap<std::pair<unsigned, unsigned>, GCRelocateInst *> RelocateIdxMap;
934   for (auto *ThisRelocate : AllRelocateCalls) {
935     auto K = std::make_pair(ThisRelocate->getBasePtrIndex(),
936                             ThisRelocate->getDerivedPtrIndex());
937     RelocateIdxMap.insert(std::make_pair(K, ThisRelocate));
938   }
939   for (auto &Item : RelocateIdxMap) {
940     std::pair<unsigned, unsigned> Key = Item.first;
941     if (Key.first == Key.second)
942       // Base relocation: nothing to insert
943       continue;
944 
945     GCRelocateInst *I = Item.second;
946     auto BaseKey = std::make_pair(Key.first, Key.first);
947 
948     // We're iterating over RelocateIdxMap so we cannot modify it.
949     auto MaybeBase = RelocateIdxMap.find(BaseKey);
950     if (MaybeBase == RelocateIdxMap.end())
951       // TODO: We might want to insert a new base object relocate and gep off
952       // that, if there are enough derived object relocates.
953       continue;
954 
955     RelocateInstMap[MaybeBase->second].push_back(I);
956   }
957 }
958 
959 // Accepts a GEP and extracts the operands into a vector provided they're all
960 // small integer constants
961 static bool getGEPSmallConstantIntOffsetV(GetElementPtrInst *GEP,
962                                           SmallVectorImpl<Value *> &OffsetV) {
963   for (unsigned i = 1; i < GEP->getNumOperands(); i++) {
964     // Only accept small constant integer operands
965     auto *Op = dyn_cast<ConstantInt>(GEP->getOperand(i));
966     if (!Op || Op->getZExtValue() > 20)
967       return false;
968   }
969 
970   for (unsigned i = 1; i < GEP->getNumOperands(); i++)
971     OffsetV.push_back(GEP->getOperand(i));
972   return true;
973 }
974 
975 // Takes a RelocatedBase (base pointer relocation instruction) and Targets to
976 // replace, computes a replacement, and affects it.
977 static bool
978 simplifyRelocatesOffABase(GCRelocateInst *RelocatedBase,
979                           const SmallVectorImpl<GCRelocateInst *> &Targets) {
980   bool MadeChange = false;
981   // We must ensure the relocation of derived pointer is defined after
982   // relocation of base pointer. If we find a relocation corresponding to base
983   // defined earlier than relocation of base then we move relocation of base
984   // right before found relocation. We consider only relocation in the same
985   // basic block as relocation of base. Relocations from other basic block will
986   // be skipped by optimization and we do not care about them.
987   for (auto R = RelocatedBase->getParent()->getFirstInsertionPt();
988        &*R != RelocatedBase; ++R)
989     if (auto *RI = dyn_cast<GCRelocateInst>(R))
990       if (RI->getStatepoint() == RelocatedBase->getStatepoint())
991         if (RI->getBasePtrIndex() == RelocatedBase->getBasePtrIndex()) {
992           RelocatedBase->moveBefore(RI);
993           break;
994         }
995 
996   for (GCRelocateInst *ToReplace : Targets) {
997     assert(ToReplace->getBasePtrIndex() == RelocatedBase->getBasePtrIndex() &&
998            "Not relocating a derived object of the original base object");
999     if (ToReplace->getBasePtrIndex() == ToReplace->getDerivedPtrIndex()) {
1000       // A duplicate relocate call. TODO: coalesce duplicates.
1001       continue;
1002     }
1003 
1004     if (RelocatedBase->getParent() != ToReplace->getParent()) {
1005       // Base and derived relocates are in different basic blocks.
1006       // In this case transform is only valid when base dominates derived
1007       // relocate. However it would be too expensive to check dominance
1008       // for each such relocate, so we skip the whole transformation.
1009       continue;
1010     }
1011 
1012     Value *Base = ToReplace->getBasePtr();
1013     auto *Derived = dyn_cast<GetElementPtrInst>(ToReplace->getDerivedPtr());
1014     if (!Derived || Derived->getPointerOperand() != Base)
1015       continue;
1016 
1017     SmallVector<Value *, 2> OffsetV;
1018     if (!getGEPSmallConstantIntOffsetV(Derived, OffsetV))
1019       continue;
1020 
1021     // Create a Builder and replace the target callsite with a gep
1022     assert(RelocatedBase->getNextNode() &&
1023            "Should always have one since it's not a terminator");
1024 
1025     // Insert after RelocatedBase
1026     IRBuilder<> Builder(RelocatedBase->getNextNode());
1027     Builder.SetCurrentDebugLocation(ToReplace->getDebugLoc());
1028 
1029     // If gc_relocate does not match the actual type, cast it to the right type.
1030     // In theory, there must be a bitcast after gc_relocate if the type does not
1031     // match, and we should reuse it to get the derived pointer. But it could be
1032     // cases like this:
1033     // bb1:
1034     //  ...
1035     //  %g1 = call coldcc i8 addrspace(1)* @llvm.experimental.gc.relocate.p1i8(...)
1036     //  br label %merge
1037     //
1038     // bb2:
1039     //  ...
1040     //  %g2 = call coldcc i8 addrspace(1)* @llvm.experimental.gc.relocate.p1i8(...)
1041     //  br label %merge
1042     //
1043     // merge:
1044     //  %p1 = phi i8 addrspace(1)* [ %g1, %bb1 ], [ %g2, %bb2 ]
1045     //  %cast = bitcast i8 addrspace(1)* %p1 in to i32 addrspace(1)*
1046     //
1047     // In this case, we can not find the bitcast any more. So we insert a new bitcast
1048     // no matter there is already one or not. In this way, we can handle all cases, and
1049     // the extra bitcast should be optimized away in later passes.
1050     Value *ActualRelocatedBase = RelocatedBase;
1051     if (RelocatedBase->getType() != Base->getType()) {
1052       ActualRelocatedBase =
1053           Builder.CreateBitCast(RelocatedBase, Base->getType());
1054     }
1055     Value *Replacement = Builder.CreateGEP(
1056         Derived->getSourceElementType(), ActualRelocatedBase, makeArrayRef(OffsetV));
1057     Replacement->takeName(ToReplace);
1058     // If the newly generated derived pointer's type does not match the original derived
1059     // pointer's type, cast the new derived pointer to match it. Same reasoning as above.
1060     Value *ActualReplacement = Replacement;
1061     if (Replacement->getType() != ToReplace->getType()) {
1062       ActualReplacement =
1063           Builder.CreateBitCast(Replacement, ToReplace->getType());
1064     }
1065     ToReplace->replaceAllUsesWith(ActualReplacement);
1066     ToReplace->eraseFromParent();
1067 
1068     MadeChange = true;
1069   }
1070   return MadeChange;
1071 }
1072 
1073 // Turns this:
1074 //
1075 // %base = ...
1076 // %ptr = gep %base + 15
1077 // %tok = statepoint (%fun, i32 0, i32 0, i32 0, %base, %ptr)
1078 // %base' = relocate(%tok, i32 4, i32 4)
1079 // %ptr' = relocate(%tok, i32 4, i32 5)
1080 // %val = load %ptr'
1081 //
1082 // into this:
1083 //
1084 // %base = ...
1085 // %ptr = gep %base + 15
1086 // %tok = statepoint (%fun, i32 0, i32 0, i32 0, %base, %ptr)
1087 // %base' = gc.relocate(%tok, i32 4, i32 4)
1088 // %ptr' = gep %base' + 15
1089 // %val = load %ptr'
1090 bool CodeGenPrepare::simplifyOffsetableRelocate(GCStatepointInst &I) {
1091   bool MadeChange = false;
1092   SmallVector<GCRelocateInst *, 2> AllRelocateCalls;
1093   for (auto *U : I.users())
1094     if (GCRelocateInst *Relocate = dyn_cast<GCRelocateInst>(U))
1095       // Collect all the relocate calls associated with a statepoint
1096       AllRelocateCalls.push_back(Relocate);
1097 
1098   // We need at least one base pointer relocation + one derived pointer
1099   // relocation to mangle
1100   if (AllRelocateCalls.size() < 2)
1101     return false;
1102 
1103   // RelocateInstMap is a mapping from the base relocate instruction to the
1104   // corresponding derived relocate instructions
1105   DenseMap<GCRelocateInst *, SmallVector<GCRelocateInst *, 2>> RelocateInstMap;
1106   computeBaseDerivedRelocateMap(AllRelocateCalls, RelocateInstMap);
1107   if (RelocateInstMap.empty())
1108     return false;
1109 
1110   for (auto &Item : RelocateInstMap)
1111     // Item.first is the RelocatedBase to offset against
1112     // Item.second is the vector of Targets to replace
1113     MadeChange = simplifyRelocatesOffABase(Item.first, Item.second);
1114   return MadeChange;
1115 }
1116 
1117 /// Sink the specified cast instruction into its user blocks.
1118 static bool SinkCast(CastInst *CI) {
1119   BasicBlock *DefBB = CI->getParent();
1120 
1121   /// InsertedCasts - Only insert a cast in each block once.
1122   DenseMap<BasicBlock*, CastInst*> InsertedCasts;
1123 
1124   bool MadeChange = false;
1125   for (Value::user_iterator UI = CI->user_begin(), E = CI->user_end();
1126        UI != E; ) {
1127     Use &TheUse = UI.getUse();
1128     Instruction *User = cast<Instruction>(*UI);
1129 
1130     // Figure out which BB this cast is used in.  For PHI's this is the
1131     // appropriate predecessor block.
1132     BasicBlock *UserBB = User->getParent();
1133     if (PHINode *PN = dyn_cast<PHINode>(User)) {
1134       UserBB = PN->getIncomingBlock(TheUse);
1135     }
1136 
1137     // Preincrement use iterator so we don't invalidate it.
1138     ++UI;
1139 
1140     // The first insertion point of a block containing an EH pad is after the
1141     // pad.  If the pad is the user, we cannot sink the cast past the pad.
1142     if (User->isEHPad())
1143       continue;
1144 
1145     // If the block selected to receive the cast is an EH pad that does not
1146     // allow non-PHI instructions before the terminator, we can't sink the
1147     // cast.
1148     if (UserBB->getTerminator()->isEHPad())
1149       continue;
1150 
1151     // If this user is in the same block as the cast, don't change the cast.
1152     if (UserBB == DefBB) continue;
1153 
1154     // If we have already inserted a cast into this block, use it.
1155     CastInst *&InsertedCast = InsertedCasts[UserBB];
1156 
1157     if (!InsertedCast) {
1158       BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt();
1159       assert(InsertPt != UserBB->end());
1160       InsertedCast = CastInst::Create(CI->getOpcode(), CI->getOperand(0),
1161                                       CI->getType(), "", &*InsertPt);
1162       InsertedCast->setDebugLoc(CI->getDebugLoc());
1163     }
1164 
1165     // Replace a use of the cast with a use of the new cast.
1166     TheUse = InsertedCast;
1167     MadeChange = true;
1168     ++NumCastUses;
1169   }
1170 
1171   // If we removed all uses, nuke the cast.
1172   if (CI->use_empty()) {
1173     salvageDebugInfo(*CI);
1174     CI->eraseFromParent();
1175     MadeChange = true;
1176   }
1177 
1178   return MadeChange;
1179 }
1180 
1181 /// If the specified cast instruction is a noop copy (e.g. it's casting from
1182 /// one pointer type to another, i32->i8 on PPC), sink it into user blocks to
1183 /// reduce the number of virtual registers that must be created and coalesced.
1184 ///
1185 /// Return true if any changes are made.
1186 static bool OptimizeNoopCopyExpression(CastInst *CI, const TargetLowering &TLI,
1187                                        const DataLayout &DL) {
1188   // Sink only "cheap" (or nop) address-space casts.  This is a weaker condition
1189   // than sinking only nop casts, but is helpful on some platforms.
1190   if (auto *ASC = dyn_cast<AddrSpaceCastInst>(CI)) {
1191     if (!TLI.isFreeAddrSpaceCast(ASC->getSrcAddressSpace(),
1192                                  ASC->getDestAddressSpace()))
1193       return false;
1194   }
1195 
1196   // If this is a noop copy,
1197   EVT SrcVT = TLI.getValueType(DL, CI->getOperand(0)->getType());
1198   EVT DstVT = TLI.getValueType(DL, CI->getType());
1199 
1200   // This is an fp<->int conversion?
1201   if (SrcVT.isInteger() != DstVT.isInteger())
1202     return false;
1203 
1204   // If this is an extension, it will be a zero or sign extension, which
1205   // isn't a noop.
1206   if (SrcVT.bitsLT(DstVT)) return false;
1207 
1208   // If these values will be promoted, find out what they will be promoted
1209   // to.  This helps us consider truncates on PPC as noop copies when they
1210   // are.
1211   if (TLI.getTypeAction(CI->getContext(), SrcVT) ==
1212       TargetLowering::TypePromoteInteger)
1213     SrcVT = TLI.getTypeToTransformTo(CI->getContext(), SrcVT);
1214   if (TLI.getTypeAction(CI->getContext(), DstVT) ==
1215       TargetLowering::TypePromoteInteger)
1216     DstVT = TLI.getTypeToTransformTo(CI->getContext(), DstVT);
1217 
1218   // If, after promotion, these are the same types, this is a noop copy.
1219   if (SrcVT != DstVT)
1220     return false;
1221 
1222   return SinkCast(CI);
1223 }
1224 
1225 bool CodeGenPrepare::replaceMathCmpWithIntrinsic(BinaryOperator *BO,
1226                                                  Value *Arg0, Value *Arg1,
1227                                                  CmpInst *Cmp,
1228                                                  Intrinsic::ID IID) {
1229   if (BO->getParent() != Cmp->getParent()) {
1230     // We used to use a dominator tree here to allow multi-block optimization.
1231     // But that was problematic because:
1232     // 1. It could cause a perf regression by hoisting the math op into the
1233     //    critical path.
1234     // 2. It could cause a perf regression by creating a value that was live
1235     //    across multiple blocks and increasing register pressure.
1236     // 3. Use of a dominator tree could cause large compile-time regression.
1237     //    This is because we recompute the DT on every change in the main CGP
1238     //    run-loop. The recomputing is probably unnecessary in many cases, so if
1239     //    that was fixed, using a DT here would be ok.
1240     return false;
1241   }
1242 
1243   // We allow matching the canonical IR (add X, C) back to (usubo X, -C).
1244   if (BO->getOpcode() == Instruction::Add &&
1245       IID == Intrinsic::usub_with_overflow) {
1246     assert(isa<Constant>(Arg1) && "Unexpected input for usubo");
1247     Arg1 = ConstantExpr::getNeg(cast<Constant>(Arg1));
1248   }
1249 
1250   // Insert at the first instruction of the pair.
1251   Instruction *InsertPt = nullptr;
1252   for (Instruction &Iter : *Cmp->getParent()) {
1253     // If BO is an XOR, it is not guaranteed that it comes after both inputs to
1254     // the overflow intrinsic are defined.
1255     if ((BO->getOpcode() != Instruction::Xor && &Iter == BO) || &Iter == Cmp) {
1256       InsertPt = &Iter;
1257       break;
1258     }
1259   }
1260   assert(InsertPt != nullptr && "Parent block did not contain cmp or binop");
1261 
1262   IRBuilder<> Builder(InsertPt);
1263   Value *MathOV = Builder.CreateBinaryIntrinsic(IID, Arg0, Arg1);
1264   if (BO->getOpcode() != Instruction::Xor) {
1265     Value *Math = Builder.CreateExtractValue(MathOV, 0, "math");
1266     BO->replaceAllUsesWith(Math);
1267   } else
1268     assert(BO->hasOneUse() &&
1269            "Patterns with XOr should use the BO only in the compare");
1270   Value *OV = Builder.CreateExtractValue(MathOV, 1, "ov");
1271   Cmp->replaceAllUsesWith(OV);
1272   Cmp->eraseFromParent();
1273   BO->eraseFromParent();
1274   return true;
1275 }
1276 
1277 /// Match special-case patterns that check for unsigned add overflow.
1278 static bool matchUAddWithOverflowConstantEdgeCases(CmpInst *Cmp,
1279                                                    BinaryOperator *&Add) {
1280   // Add = add A, 1; Cmp = icmp eq A,-1 (overflow if A is max val)
1281   // Add = add A,-1; Cmp = icmp ne A, 0 (overflow if A is non-zero)
1282   Value *A = Cmp->getOperand(0), *B = Cmp->getOperand(1);
1283 
1284   // We are not expecting non-canonical/degenerate code. Just bail out.
1285   if (isa<Constant>(A))
1286     return false;
1287 
1288   ICmpInst::Predicate Pred = Cmp->getPredicate();
1289   if (Pred == ICmpInst::ICMP_EQ && match(B, m_AllOnes()))
1290     B = ConstantInt::get(B->getType(), 1);
1291   else if (Pred == ICmpInst::ICMP_NE && match(B, m_ZeroInt()))
1292     B = ConstantInt::get(B->getType(), -1);
1293   else
1294     return false;
1295 
1296   // Check the users of the variable operand of the compare looking for an add
1297   // with the adjusted constant.
1298   for (User *U : A->users()) {
1299     if (match(U, m_Add(m_Specific(A), m_Specific(B)))) {
1300       Add = cast<BinaryOperator>(U);
1301       return true;
1302     }
1303   }
1304   return false;
1305 }
1306 
1307 /// Try to combine the compare into a call to the llvm.uadd.with.overflow
1308 /// intrinsic. Return true if any changes were made.
1309 bool CodeGenPrepare::combineToUAddWithOverflow(CmpInst *Cmp,
1310                                                bool &ModifiedDT) {
1311   Value *A, *B;
1312   BinaryOperator *Add;
1313   if (!match(Cmp, m_UAddWithOverflow(m_Value(A), m_Value(B), m_BinOp(Add)))) {
1314     if (!matchUAddWithOverflowConstantEdgeCases(Cmp, Add))
1315       return false;
1316     // Set A and B in case we match matchUAddWithOverflowConstantEdgeCases.
1317     A = Add->getOperand(0);
1318     B = Add->getOperand(1);
1319   }
1320 
1321   if (!TLI->shouldFormOverflowOp(ISD::UADDO,
1322                                  TLI->getValueType(*DL, Add->getType()),
1323                                  Add->hasNUsesOrMore(2)))
1324     return false;
1325 
1326   // We don't want to move around uses of condition values this late, so we
1327   // check if it is legal to create the call to the intrinsic in the basic
1328   // block containing the icmp.
1329   if (Add->getParent() != Cmp->getParent() && !Add->hasOneUse())
1330     return false;
1331 
1332   if (!replaceMathCmpWithIntrinsic(Add, A, B, Cmp,
1333                                    Intrinsic::uadd_with_overflow))
1334     return false;
1335 
1336   // Reset callers - do not crash by iterating over a dead instruction.
1337   ModifiedDT = true;
1338   return true;
1339 }
1340 
1341 bool CodeGenPrepare::combineToUSubWithOverflow(CmpInst *Cmp,
1342                                                bool &ModifiedDT) {
1343   // We are not expecting non-canonical/degenerate code. Just bail out.
1344   Value *A = Cmp->getOperand(0), *B = Cmp->getOperand(1);
1345   if (isa<Constant>(A) && isa<Constant>(B))
1346     return false;
1347 
1348   // Convert (A u> B) to (A u< B) to simplify pattern matching.
1349   ICmpInst::Predicate Pred = Cmp->getPredicate();
1350   if (Pred == ICmpInst::ICMP_UGT) {
1351     std::swap(A, B);
1352     Pred = ICmpInst::ICMP_ULT;
1353   }
1354   // Convert special-case: (A == 0) is the same as (A u< 1).
1355   if (Pred == ICmpInst::ICMP_EQ && match(B, m_ZeroInt())) {
1356     B = ConstantInt::get(B->getType(), 1);
1357     Pred = ICmpInst::ICMP_ULT;
1358   }
1359   // Convert special-case: (A != 0) is the same as (0 u< A).
1360   if (Pred == ICmpInst::ICMP_NE && match(B, m_ZeroInt())) {
1361     std::swap(A, B);
1362     Pred = ICmpInst::ICMP_ULT;
1363   }
1364   if (Pred != ICmpInst::ICMP_ULT)
1365     return false;
1366 
1367   // Walk the users of a variable operand of a compare looking for a subtract or
1368   // add with that same operand. Also match the 2nd operand of the compare to
1369   // the add/sub, but that may be a negated constant operand of an add.
1370   Value *CmpVariableOperand = isa<Constant>(A) ? B : A;
1371   BinaryOperator *Sub = nullptr;
1372   for (User *U : CmpVariableOperand->users()) {
1373     // A - B, A u< B --> usubo(A, B)
1374     if (match(U, m_Sub(m_Specific(A), m_Specific(B)))) {
1375       Sub = cast<BinaryOperator>(U);
1376       break;
1377     }
1378 
1379     // A + (-C), A u< C (canonicalized form of (sub A, C))
1380     const APInt *CmpC, *AddC;
1381     if (match(U, m_Add(m_Specific(A), m_APInt(AddC))) &&
1382         match(B, m_APInt(CmpC)) && *AddC == -(*CmpC)) {
1383       Sub = cast<BinaryOperator>(U);
1384       break;
1385     }
1386   }
1387   if (!Sub)
1388     return false;
1389 
1390   if (!TLI->shouldFormOverflowOp(ISD::USUBO,
1391                                  TLI->getValueType(*DL, Sub->getType()),
1392                                  Sub->hasNUsesOrMore(2)))
1393     return false;
1394 
1395   if (!replaceMathCmpWithIntrinsic(Sub, Sub->getOperand(0), Sub->getOperand(1),
1396                                    Cmp, Intrinsic::usub_with_overflow))
1397     return false;
1398 
1399   // Reset callers - do not crash by iterating over a dead instruction.
1400   ModifiedDT = true;
1401   return true;
1402 }
1403 
1404 /// Sink the given CmpInst into user blocks to reduce the number of virtual
1405 /// registers that must be created and coalesced. This is a clear win except on
1406 /// targets with multiple condition code registers (PowerPC), where it might
1407 /// lose; some adjustment may be wanted there.
1408 ///
1409 /// Return true if any changes are made.
1410 static bool sinkCmpExpression(CmpInst *Cmp, const TargetLowering &TLI) {
1411   if (TLI.hasMultipleConditionRegisters())
1412     return false;
1413 
1414   // Avoid sinking soft-FP comparisons, since this can move them into a loop.
1415   if (TLI.useSoftFloat() && isa<FCmpInst>(Cmp))
1416     return false;
1417 
1418   // Only insert a cmp in each block once.
1419   DenseMap<BasicBlock*, CmpInst*> InsertedCmps;
1420 
1421   bool MadeChange = false;
1422   for (Value::user_iterator UI = Cmp->user_begin(), E = Cmp->user_end();
1423        UI != E; ) {
1424     Use &TheUse = UI.getUse();
1425     Instruction *User = cast<Instruction>(*UI);
1426 
1427     // Preincrement use iterator so we don't invalidate it.
1428     ++UI;
1429 
1430     // Don't bother for PHI nodes.
1431     if (isa<PHINode>(User))
1432       continue;
1433 
1434     // Figure out which BB this cmp is used in.
1435     BasicBlock *UserBB = User->getParent();
1436     BasicBlock *DefBB = Cmp->getParent();
1437 
1438     // If this user is in the same block as the cmp, don't change the cmp.
1439     if (UserBB == DefBB) continue;
1440 
1441     // If we have already inserted a cmp into this block, use it.
1442     CmpInst *&InsertedCmp = InsertedCmps[UserBB];
1443 
1444     if (!InsertedCmp) {
1445       BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt();
1446       assert(InsertPt != UserBB->end());
1447       InsertedCmp =
1448           CmpInst::Create(Cmp->getOpcode(), Cmp->getPredicate(),
1449                           Cmp->getOperand(0), Cmp->getOperand(1), "",
1450                           &*InsertPt);
1451       // Propagate the debug info.
1452       InsertedCmp->setDebugLoc(Cmp->getDebugLoc());
1453     }
1454 
1455     // Replace a use of the cmp with a use of the new cmp.
1456     TheUse = InsertedCmp;
1457     MadeChange = true;
1458     ++NumCmpUses;
1459   }
1460 
1461   // If we removed all uses, nuke the cmp.
1462   if (Cmp->use_empty()) {
1463     Cmp->eraseFromParent();
1464     MadeChange = true;
1465   }
1466 
1467   return MadeChange;
1468 }
1469 
1470 /// For pattern like:
1471 ///
1472 ///   DomCond = icmp sgt/slt CmpOp0, CmpOp1 (might not be in DomBB)
1473 ///   ...
1474 /// DomBB:
1475 ///   ...
1476 ///   br DomCond, TrueBB, CmpBB
1477 /// CmpBB: (with DomBB being the single predecessor)
1478 ///   ...
1479 ///   Cmp = icmp eq CmpOp0, CmpOp1
1480 ///   ...
1481 ///
1482 /// It would use two comparison on targets that lowering of icmp sgt/slt is
1483 /// different from lowering of icmp eq (PowerPC). This function try to convert
1484 /// 'Cmp = icmp eq CmpOp0, CmpOp1' to ' Cmp = icmp slt/sgt CmpOp0, CmpOp1'.
1485 /// After that, DomCond and Cmp can use the same comparison so reduce one
1486 /// comparison.
1487 ///
1488 /// Return true if any changes are made.
1489 static bool foldICmpWithDominatingICmp(CmpInst *Cmp,
1490                                        const TargetLowering &TLI) {
1491   if (!EnableICMP_EQToICMP_ST && TLI.isEqualityCmpFoldedWithSignedCmp())
1492     return false;
1493 
1494   ICmpInst::Predicate Pred = Cmp->getPredicate();
1495   if (Pred != ICmpInst::ICMP_EQ)
1496     return false;
1497 
1498   // If icmp eq has users other than BranchInst and SelectInst, converting it to
1499   // icmp slt/sgt would introduce more redundant LLVM IR.
1500   for (User *U : Cmp->users()) {
1501     if (isa<BranchInst>(U))
1502       continue;
1503     if (isa<SelectInst>(U) && cast<SelectInst>(U)->getCondition() == Cmp)
1504       continue;
1505     return false;
1506   }
1507 
1508   // This is a cheap/incomplete check for dominance - just match a single
1509   // predecessor with a conditional branch.
1510   BasicBlock *CmpBB = Cmp->getParent();
1511   BasicBlock *DomBB = CmpBB->getSinglePredecessor();
1512   if (!DomBB)
1513     return false;
1514 
1515   // We want to ensure that the only way control gets to the comparison of
1516   // interest is that a less/greater than comparison on the same operands is
1517   // false.
1518   Value *DomCond;
1519   BasicBlock *TrueBB, *FalseBB;
1520   if (!match(DomBB->getTerminator(), m_Br(m_Value(DomCond), TrueBB, FalseBB)))
1521     return false;
1522   if (CmpBB != FalseBB)
1523     return false;
1524 
1525   Value *CmpOp0 = Cmp->getOperand(0), *CmpOp1 = Cmp->getOperand(1);
1526   ICmpInst::Predicate DomPred;
1527   if (!match(DomCond, m_ICmp(DomPred, m_Specific(CmpOp0), m_Specific(CmpOp1))))
1528     return false;
1529   if (DomPred != ICmpInst::ICMP_SGT && DomPred != ICmpInst::ICMP_SLT)
1530     return false;
1531 
1532   // Convert the equality comparison to the opposite of the dominating
1533   // comparison and swap the direction for all branch/select users.
1534   // We have conceptually converted:
1535   // Res = (a < b) ? <LT_RES> : (a == b) ? <EQ_RES> : <GT_RES>;
1536   // to
1537   // Res = (a < b) ? <LT_RES> : (a > b)  ? <GT_RES> : <EQ_RES>;
1538   // And similarly for branches.
1539   for (User *U : Cmp->users()) {
1540     if (auto *BI = dyn_cast<BranchInst>(U)) {
1541       assert(BI->isConditional() && "Must be conditional");
1542       BI->swapSuccessors();
1543       continue;
1544     }
1545     if (auto *SI = dyn_cast<SelectInst>(U)) {
1546       // Swap operands
1547       SI->swapValues();
1548       SI->swapProfMetadata();
1549       continue;
1550     }
1551     llvm_unreachable("Must be a branch or a select");
1552   }
1553   Cmp->setPredicate(CmpInst::getSwappedPredicate(DomPred));
1554   return true;
1555 }
1556 
1557 bool CodeGenPrepare::optimizeCmp(CmpInst *Cmp, bool &ModifiedDT) {
1558   if (sinkCmpExpression(Cmp, *TLI))
1559     return true;
1560 
1561   if (combineToUAddWithOverflow(Cmp, ModifiedDT))
1562     return true;
1563 
1564   if (combineToUSubWithOverflow(Cmp, ModifiedDT))
1565     return true;
1566 
1567   if (foldICmpWithDominatingICmp(Cmp, *TLI))
1568     return true;
1569 
1570   return false;
1571 }
1572 
1573 /// Duplicate and sink the given 'and' instruction into user blocks where it is
1574 /// used in a compare to allow isel to generate better code for targets where
1575 /// this operation can be combined.
1576 ///
1577 /// Return true if any changes are made.
1578 static bool sinkAndCmp0Expression(Instruction *AndI,
1579                                   const TargetLowering &TLI,
1580                                   SetOfInstrs &InsertedInsts) {
1581   // Double-check that we're not trying to optimize an instruction that was
1582   // already optimized by some other part of this pass.
1583   assert(!InsertedInsts.count(AndI) &&
1584          "Attempting to optimize already optimized and instruction");
1585   (void) InsertedInsts;
1586 
1587   // Nothing to do for single use in same basic block.
1588   if (AndI->hasOneUse() &&
1589       AndI->getParent() == cast<Instruction>(*AndI->user_begin())->getParent())
1590     return false;
1591 
1592   // Try to avoid cases where sinking/duplicating is likely to increase register
1593   // pressure.
1594   if (!isa<ConstantInt>(AndI->getOperand(0)) &&
1595       !isa<ConstantInt>(AndI->getOperand(1)) &&
1596       AndI->getOperand(0)->hasOneUse() && AndI->getOperand(1)->hasOneUse())
1597     return false;
1598 
1599   for (auto *U : AndI->users()) {
1600     Instruction *User = cast<Instruction>(U);
1601 
1602     // Only sink 'and' feeding icmp with 0.
1603     if (!isa<ICmpInst>(User))
1604       return false;
1605 
1606     auto *CmpC = dyn_cast<ConstantInt>(User->getOperand(1));
1607     if (!CmpC || !CmpC->isZero())
1608       return false;
1609   }
1610 
1611   if (!TLI.isMaskAndCmp0FoldingBeneficial(*AndI))
1612     return false;
1613 
1614   LLVM_DEBUG(dbgs() << "found 'and' feeding only icmp 0;\n");
1615   LLVM_DEBUG(AndI->getParent()->dump());
1616 
1617   // Push the 'and' into the same block as the icmp 0.  There should only be
1618   // one (icmp (and, 0)) in each block, since CSE/GVN should have removed any
1619   // others, so we don't need to keep track of which BBs we insert into.
1620   for (Value::user_iterator UI = AndI->user_begin(), E = AndI->user_end();
1621        UI != E; ) {
1622     Use &TheUse = UI.getUse();
1623     Instruction *User = cast<Instruction>(*UI);
1624 
1625     // Preincrement use iterator so we don't invalidate it.
1626     ++UI;
1627 
1628     LLVM_DEBUG(dbgs() << "sinking 'and' use: " << *User << "\n");
1629 
1630     // Keep the 'and' in the same place if the use is already in the same block.
1631     Instruction *InsertPt =
1632         User->getParent() == AndI->getParent() ? AndI : User;
1633     Instruction *InsertedAnd =
1634         BinaryOperator::Create(Instruction::And, AndI->getOperand(0),
1635                                AndI->getOperand(1), "", InsertPt);
1636     // Propagate the debug info.
1637     InsertedAnd->setDebugLoc(AndI->getDebugLoc());
1638 
1639     // Replace a use of the 'and' with a use of the new 'and'.
1640     TheUse = InsertedAnd;
1641     ++NumAndUses;
1642     LLVM_DEBUG(User->getParent()->dump());
1643   }
1644 
1645   // We removed all uses, nuke the and.
1646   AndI->eraseFromParent();
1647   return true;
1648 }
1649 
1650 /// Check if the candidates could be combined with a shift instruction, which
1651 /// includes:
1652 /// 1. Truncate instruction
1653 /// 2. And instruction and the imm is a mask of the low bits:
1654 /// imm & (imm+1) == 0
1655 static bool isExtractBitsCandidateUse(Instruction *User) {
1656   if (!isa<TruncInst>(User)) {
1657     if (User->getOpcode() != Instruction::And ||
1658         !isa<ConstantInt>(User->getOperand(1)))
1659       return false;
1660 
1661     const APInt &Cimm = cast<ConstantInt>(User->getOperand(1))->getValue();
1662 
1663     if ((Cimm & (Cimm + 1)).getBoolValue())
1664       return false;
1665   }
1666   return true;
1667 }
1668 
1669 /// Sink both shift and truncate instruction to the use of truncate's BB.
1670 static bool
1671 SinkShiftAndTruncate(BinaryOperator *ShiftI, Instruction *User, ConstantInt *CI,
1672                      DenseMap<BasicBlock *, BinaryOperator *> &InsertedShifts,
1673                      const TargetLowering &TLI, const DataLayout &DL) {
1674   BasicBlock *UserBB = User->getParent();
1675   DenseMap<BasicBlock *, CastInst *> InsertedTruncs;
1676   auto *TruncI = cast<TruncInst>(User);
1677   bool MadeChange = false;
1678 
1679   for (Value::user_iterator TruncUI = TruncI->user_begin(),
1680                             TruncE = TruncI->user_end();
1681        TruncUI != TruncE;) {
1682 
1683     Use &TruncTheUse = TruncUI.getUse();
1684     Instruction *TruncUser = cast<Instruction>(*TruncUI);
1685     // Preincrement use iterator so we don't invalidate it.
1686 
1687     ++TruncUI;
1688 
1689     int ISDOpcode = TLI.InstructionOpcodeToISD(TruncUser->getOpcode());
1690     if (!ISDOpcode)
1691       continue;
1692 
1693     // If the use is actually a legal node, there will not be an
1694     // implicit truncate.
1695     // FIXME: always querying the result type is just an
1696     // approximation; some nodes' legality is determined by the
1697     // operand or other means. There's no good way to find out though.
1698     if (TLI.isOperationLegalOrCustom(
1699             ISDOpcode, TLI.getValueType(DL, TruncUser->getType(), true)))
1700       continue;
1701 
1702     // Don't bother for PHI nodes.
1703     if (isa<PHINode>(TruncUser))
1704       continue;
1705 
1706     BasicBlock *TruncUserBB = TruncUser->getParent();
1707 
1708     if (UserBB == TruncUserBB)
1709       continue;
1710 
1711     BinaryOperator *&InsertedShift = InsertedShifts[TruncUserBB];
1712     CastInst *&InsertedTrunc = InsertedTruncs[TruncUserBB];
1713 
1714     if (!InsertedShift && !InsertedTrunc) {
1715       BasicBlock::iterator InsertPt = TruncUserBB->getFirstInsertionPt();
1716       assert(InsertPt != TruncUserBB->end());
1717       // Sink the shift
1718       if (ShiftI->getOpcode() == Instruction::AShr)
1719         InsertedShift = BinaryOperator::CreateAShr(ShiftI->getOperand(0), CI,
1720                                                    "", &*InsertPt);
1721       else
1722         InsertedShift = BinaryOperator::CreateLShr(ShiftI->getOperand(0), CI,
1723                                                    "", &*InsertPt);
1724       InsertedShift->setDebugLoc(ShiftI->getDebugLoc());
1725 
1726       // Sink the trunc
1727       BasicBlock::iterator TruncInsertPt = TruncUserBB->getFirstInsertionPt();
1728       TruncInsertPt++;
1729       assert(TruncInsertPt != TruncUserBB->end());
1730 
1731       InsertedTrunc = CastInst::Create(TruncI->getOpcode(), InsertedShift,
1732                                        TruncI->getType(), "", &*TruncInsertPt);
1733       InsertedTrunc->setDebugLoc(TruncI->getDebugLoc());
1734 
1735       MadeChange = true;
1736 
1737       TruncTheUse = InsertedTrunc;
1738     }
1739   }
1740   return MadeChange;
1741 }
1742 
1743 /// Sink the shift *right* instruction into user blocks if the uses could
1744 /// potentially be combined with this shift instruction and generate BitExtract
1745 /// instruction. It will only be applied if the architecture supports BitExtract
1746 /// instruction. Here is an example:
1747 /// BB1:
1748 ///   %x.extract.shift = lshr i64 %arg1, 32
1749 /// BB2:
1750 ///   %x.extract.trunc = trunc i64 %x.extract.shift to i16
1751 /// ==>
1752 ///
1753 /// BB2:
1754 ///   %x.extract.shift.1 = lshr i64 %arg1, 32
1755 ///   %x.extract.trunc = trunc i64 %x.extract.shift.1 to i16
1756 ///
1757 /// CodeGen will recognize the pattern in BB2 and generate BitExtract
1758 /// instruction.
1759 /// Return true if any changes are made.
1760 static bool OptimizeExtractBits(BinaryOperator *ShiftI, ConstantInt *CI,
1761                                 const TargetLowering &TLI,
1762                                 const DataLayout &DL) {
1763   BasicBlock *DefBB = ShiftI->getParent();
1764 
1765   /// Only insert instructions in each block once.
1766   DenseMap<BasicBlock *, BinaryOperator *> InsertedShifts;
1767 
1768   bool shiftIsLegal = TLI.isTypeLegal(TLI.getValueType(DL, ShiftI->getType()));
1769 
1770   bool MadeChange = false;
1771   for (Value::user_iterator UI = ShiftI->user_begin(), E = ShiftI->user_end();
1772        UI != E;) {
1773     Use &TheUse = UI.getUse();
1774     Instruction *User = cast<Instruction>(*UI);
1775     // Preincrement use iterator so we don't invalidate it.
1776     ++UI;
1777 
1778     // Don't bother for PHI nodes.
1779     if (isa<PHINode>(User))
1780       continue;
1781 
1782     if (!isExtractBitsCandidateUse(User))
1783       continue;
1784 
1785     BasicBlock *UserBB = User->getParent();
1786 
1787     if (UserBB == DefBB) {
1788       // If the shift and truncate instruction are in the same BB. The use of
1789       // the truncate(TruncUse) may still introduce another truncate if not
1790       // legal. In this case, we would like to sink both shift and truncate
1791       // instruction to the BB of TruncUse.
1792       // for example:
1793       // BB1:
1794       // i64 shift.result = lshr i64 opnd, imm
1795       // trunc.result = trunc shift.result to i16
1796       //
1797       // BB2:
1798       //   ----> We will have an implicit truncate here if the architecture does
1799       //   not have i16 compare.
1800       // cmp i16 trunc.result, opnd2
1801       //
1802       if (isa<TruncInst>(User) && shiftIsLegal
1803           // If the type of the truncate is legal, no truncate will be
1804           // introduced in other basic blocks.
1805           &&
1806           (!TLI.isTypeLegal(TLI.getValueType(DL, User->getType()))))
1807         MadeChange =
1808             SinkShiftAndTruncate(ShiftI, User, CI, InsertedShifts, TLI, DL);
1809 
1810       continue;
1811     }
1812     // If we have already inserted a shift into this block, use it.
1813     BinaryOperator *&InsertedShift = InsertedShifts[UserBB];
1814 
1815     if (!InsertedShift) {
1816       BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt();
1817       assert(InsertPt != UserBB->end());
1818 
1819       if (ShiftI->getOpcode() == Instruction::AShr)
1820         InsertedShift = BinaryOperator::CreateAShr(ShiftI->getOperand(0), CI,
1821                                                    "", &*InsertPt);
1822       else
1823         InsertedShift = BinaryOperator::CreateLShr(ShiftI->getOperand(0), CI,
1824                                                    "", &*InsertPt);
1825       InsertedShift->setDebugLoc(ShiftI->getDebugLoc());
1826 
1827       MadeChange = true;
1828     }
1829 
1830     // Replace a use of the shift with a use of the new shift.
1831     TheUse = InsertedShift;
1832   }
1833 
1834   // If we removed all uses, or there are none, nuke the shift.
1835   if (ShiftI->use_empty()) {
1836     salvageDebugInfo(*ShiftI);
1837     ShiftI->eraseFromParent();
1838     MadeChange = true;
1839   }
1840 
1841   return MadeChange;
1842 }
1843 
1844 /// If counting leading or trailing zeros is an expensive operation and a zero
1845 /// input is defined, add a check for zero to avoid calling the intrinsic.
1846 ///
1847 /// We want to transform:
1848 ///     %z = call i64 @llvm.cttz.i64(i64 %A, i1 false)
1849 ///
1850 /// into:
1851 ///   entry:
1852 ///     %cmpz = icmp eq i64 %A, 0
1853 ///     br i1 %cmpz, label %cond.end, label %cond.false
1854 ///   cond.false:
1855 ///     %z = call i64 @llvm.cttz.i64(i64 %A, i1 true)
1856 ///     br label %cond.end
1857 ///   cond.end:
1858 ///     %ctz = phi i64 [ 64, %entry ], [ %z, %cond.false ]
1859 ///
1860 /// If the transform is performed, return true and set ModifiedDT to true.
1861 static bool despeculateCountZeros(IntrinsicInst *CountZeros,
1862                                   const TargetLowering *TLI,
1863                                   const DataLayout *DL,
1864                                   bool &ModifiedDT) {
1865   // If a zero input is undefined, it doesn't make sense to despeculate that.
1866   if (match(CountZeros->getOperand(1), m_One()))
1867     return false;
1868 
1869   // If it's cheap to speculate, there's nothing to do.
1870   auto IntrinsicID = CountZeros->getIntrinsicID();
1871   if ((IntrinsicID == Intrinsic::cttz && TLI->isCheapToSpeculateCttz()) ||
1872       (IntrinsicID == Intrinsic::ctlz && TLI->isCheapToSpeculateCtlz()))
1873     return false;
1874 
1875   // Only handle legal scalar cases. Anything else requires too much work.
1876   Type *Ty = CountZeros->getType();
1877   unsigned SizeInBits = Ty->getPrimitiveSizeInBits();
1878   if (Ty->isVectorTy() || SizeInBits > DL->getLargestLegalIntTypeSizeInBits())
1879     return false;
1880 
1881   // The intrinsic will be sunk behind a compare against zero and branch.
1882   BasicBlock *StartBlock = CountZeros->getParent();
1883   BasicBlock *CallBlock = StartBlock->splitBasicBlock(CountZeros, "cond.false");
1884 
1885   // Create another block after the count zero intrinsic. A PHI will be added
1886   // in this block to select the result of the intrinsic or the bit-width
1887   // constant if the input to the intrinsic is zero.
1888   BasicBlock::iterator SplitPt = ++(BasicBlock::iterator(CountZeros));
1889   BasicBlock *EndBlock = CallBlock->splitBasicBlock(SplitPt, "cond.end");
1890 
1891   // Set up a builder to create a compare, conditional branch, and PHI.
1892   IRBuilder<> Builder(CountZeros->getContext());
1893   Builder.SetInsertPoint(StartBlock->getTerminator());
1894   Builder.SetCurrentDebugLocation(CountZeros->getDebugLoc());
1895 
1896   // Replace the unconditional branch that was created by the first split with
1897   // a compare against zero and a conditional branch.
1898   Value *Zero = Constant::getNullValue(Ty);
1899   Value *Cmp = Builder.CreateICmpEQ(CountZeros->getOperand(0), Zero, "cmpz");
1900   Builder.CreateCondBr(Cmp, EndBlock, CallBlock);
1901   StartBlock->getTerminator()->eraseFromParent();
1902 
1903   // Create a PHI in the end block to select either the output of the intrinsic
1904   // or the bit width of the operand.
1905   Builder.SetInsertPoint(&EndBlock->front());
1906   PHINode *PN = Builder.CreatePHI(Ty, 2, "ctz");
1907   CountZeros->replaceAllUsesWith(PN);
1908   Value *BitWidth = Builder.getInt(APInt(SizeInBits, SizeInBits));
1909   PN->addIncoming(BitWidth, StartBlock);
1910   PN->addIncoming(CountZeros, CallBlock);
1911 
1912   // We are explicitly handling the zero case, so we can set the intrinsic's
1913   // undefined zero argument to 'true'. This will also prevent reprocessing the
1914   // intrinsic; we only despeculate when a zero input is defined.
1915   CountZeros->setArgOperand(1, Builder.getTrue());
1916   ModifiedDT = true;
1917   return true;
1918 }
1919 
1920 bool CodeGenPrepare::optimizeCallInst(CallInst *CI, bool &ModifiedDT) {
1921   BasicBlock *BB = CI->getParent();
1922 
1923   // Lower inline assembly if we can.
1924   // If we found an inline asm expession, and if the target knows how to
1925   // lower it to normal LLVM code, do so now.
1926   if (CI->isInlineAsm()) {
1927     if (TLI->ExpandInlineAsm(CI)) {
1928       // Avoid invalidating the iterator.
1929       CurInstIterator = BB->begin();
1930       // Avoid processing instructions out of order, which could cause
1931       // reuse before a value is defined.
1932       SunkAddrs.clear();
1933       return true;
1934     }
1935     // Sink address computing for memory operands into the block.
1936     if (optimizeInlineAsmInst(CI))
1937       return true;
1938   }
1939 
1940   // Align the pointer arguments to this call if the target thinks it's a good
1941   // idea
1942   unsigned MinSize, PrefAlign;
1943   if (TLI->shouldAlignPointerArgs(CI, MinSize, PrefAlign)) {
1944     for (auto &Arg : CI->arg_operands()) {
1945       // We want to align both objects whose address is used directly and
1946       // objects whose address is used in casts and GEPs, though it only makes
1947       // sense for GEPs if the offset is a multiple of the desired alignment and
1948       // if size - offset meets the size threshold.
1949       if (!Arg->getType()->isPointerTy())
1950         continue;
1951       APInt Offset(DL->getIndexSizeInBits(
1952                        cast<PointerType>(Arg->getType())->getAddressSpace()),
1953                    0);
1954       Value *Val = Arg->stripAndAccumulateInBoundsConstantOffsets(*DL, Offset);
1955       uint64_t Offset2 = Offset.getLimitedValue();
1956       if ((Offset2 & (PrefAlign-1)) != 0)
1957         continue;
1958       AllocaInst *AI;
1959       if ((AI = dyn_cast<AllocaInst>(Val)) && AI->getAlignment() < PrefAlign &&
1960           DL->getTypeAllocSize(AI->getAllocatedType()) >= MinSize + Offset2)
1961         AI->setAlignment(Align(PrefAlign));
1962       // Global variables can only be aligned if they are defined in this
1963       // object (i.e. they are uniquely initialized in this object), and
1964       // over-aligning global variables that have an explicit section is
1965       // forbidden.
1966       GlobalVariable *GV;
1967       if ((GV = dyn_cast<GlobalVariable>(Val)) && GV->canIncreaseAlignment() &&
1968           GV->getPointerAlignment(*DL) < PrefAlign &&
1969           DL->getTypeAllocSize(GV->getValueType()) >=
1970               MinSize + Offset2)
1971         GV->setAlignment(MaybeAlign(PrefAlign));
1972     }
1973     // If this is a memcpy (or similar) then we may be able to improve the
1974     // alignment
1975     if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(CI)) {
1976       Align DestAlign = getKnownAlignment(MI->getDest(), *DL);
1977       MaybeAlign MIDestAlign = MI->getDestAlign();
1978       if (!MIDestAlign || DestAlign > *MIDestAlign)
1979         MI->setDestAlignment(DestAlign);
1980       if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) {
1981         MaybeAlign MTISrcAlign = MTI->getSourceAlign();
1982         Align SrcAlign = getKnownAlignment(MTI->getSource(), *DL);
1983         if (!MTISrcAlign || SrcAlign > *MTISrcAlign)
1984           MTI->setSourceAlignment(SrcAlign);
1985       }
1986     }
1987   }
1988 
1989   // If we have a cold call site, try to sink addressing computation into the
1990   // cold block.  This interacts with our handling for loads and stores to
1991   // ensure that we can fold all uses of a potential addressing computation
1992   // into their uses.  TODO: generalize this to work over profiling data
1993   if (CI->hasFnAttr(Attribute::Cold) &&
1994       !OptSize && !llvm::shouldOptimizeForSize(BB, PSI, BFI.get()))
1995     for (auto &Arg : CI->arg_operands()) {
1996       if (!Arg->getType()->isPointerTy())
1997         continue;
1998       unsigned AS = Arg->getType()->getPointerAddressSpace();
1999       return optimizeMemoryInst(CI, Arg, Arg->getType(), AS);
2000     }
2001 
2002   IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI);
2003   if (II) {
2004     switch (II->getIntrinsicID()) {
2005     default: break;
2006     case Intrinsic::assume: {
2007       II->eraseFromParent();
2008       return true;
2009     }
2010 
2011     case Intrinsic::experimental_widenable_condition: {
2012       // Give up on future widening oppurtunties so that we can fold away dead
2013       // paths and merge blocks before going into block-local instruction
2014       // selection.
2015       if (II->use_empty()) {
2016         II->eraseFromParent();
2017         return true;
2018       }
2019       Constant *RetVal = ConstantInt::getTrue(II->getContext());
2020       resetIteratorIfInvalidatedWhileCalling(BB, [&]() {
2021         replaceAndRecursivelySimplify(CI, RetVal, TLInfo, nullptr);
2022       });
2023       return true;
2024     }
2025     case Intrinsic::objectsize:
2026       llvm_unreachable("llvm.objectsize.* should have been lowered already");
2027     case Intrinsic::is_constant:
2028       llvm_unreachable("llvm.is.constant.* should have been lowered already");
2029     case Intrinsic::aarch64_stlxr:
2030     case Intrinsic::aarch64_stxr: {
2031       ZExtInst *ExtVal = dyn_cast<ZExtInst>(CI->getArgOperand(0));
2032       if (!ExtVal || !ExtVal->hasOneUse() ||
2033           ExtVal->getParent() == CI->getParent())
2034         return false;
2035       // Sink a zext feeding stlxr/stxr before it, so it can be folded into it.
2036       ExtVal->moveBefore(CI);
2037       // Mark this instruction as "inserted by CGP", so that other
2038       // optimizations don't touch it.
2039       InsertedInsts.insert(ExtVal);
2040       return true;
2041     }
2042 
2043     case Intrinsic::launder_invariant_group:
2044     case Intrinsic::strip_invariant_group: {
2045       Value *ArgVal = II->getArgOperand(0);
2046       auto it = LargeOffsetGEPMap.find(II);
2047       if (it != LargeOffsetGEPMap.end()) {
2048           // Merge entries in LargeOffsetGEPMap to reflect the RAUW.
2049           // Make sure not to have to deal with iterator invalidation
2050           // after possibly adding ArgVal to LargeOffsetGEPMap.
2051           auto GEPs = std::move(it->second);
2052           LargeOffsetGEPMap[ArgVal].append(GEPs.begin(), GEPs.end());
2053           LargeOffsetGEPMap.erase(II);
2054       }
2055 
2056       II->replaceAllUsesWith(ArgVal);
2057       II->eraseFromParent();
2058       return true;
2059     }
2060     case Intrinsic::cttz:
2061     case Intrinsic::ctlz:
2062       // If counting zeros is expensive, try to avoid it.
2063       return despeculateCountZeros(II, TLI, DL, ModifiedDT);
2064     case Intrinsic::fshl:
2065     case Intrinsic::fshr:
2066       return optimizeFunnelShift(II);
2067     case Intrinsic::dbg_value:
2068       return fixupDbgValue(II);
2069     case Intrinsic::vscale: {
2070       // If datalayout has no special restrictions on vector data layout,
2071       // replace `llvm.vscale` by an equivalent constant expression
2072       // to benefit from cheap constant propagation.
2073       Type *ScalableVectorTy =
2074           VectorType::get(Type::getInt8Ty(II->getContext()), 1, true);
2075       if (DL->getTypeAllocSize(ScalableVectorTy).getKnownMinSize() == 8) {
2076         auto *Null = Constant::getNullValue(ScalableVectorTy->getPointerTo());
2077         auto *One = ConstantInt::getSigned(II->getType(), 1);
2078         auto *CGep =
2079             ConstantExpr::getGetElementPtr(ScalableVectorTy, Null, One);
2080         II->replaceAllUsesWith(ConstantExpr::getPtrToInt(CGep, II->getType()));
2081         II->eraseFromParent();
2082         return true;
2083       }
2084       break;
2085     }
2086     case Intrinsic::masked_gather:
2087       return optimizeGatherScatterInst(II, II->getArgOperand(0));
2088     case Intrinsic::masked_scatter:
2089       return optimizeGatherScatterInst(II, II->getArgOperand(1));
2090     }
2091 
2092     SmallVector<Value *, 2> PtrOps;
2093     Type *AccessTy;
2094     if (TLI->getAddrModeArguments(II, PtrOps, AccessTy))
2095       while (!PtrOps.empty()) {
2096         Value *PtrVal = PtrOps.pop_back_val();
2097         unsigned AS = PtrVal->getType()->getPointerAddressSpace();
2098         if (optimizeMemoryInst(II, PtrVal, AccessTy, AS))
2099           return true;
2100       }
2101   }
2102 
2103   // From here on out we're working with named functions.
2104   if (!CI->getCalledFunction()) return false;
2105 
2106   // Lower all default uses of _chk calls.  This is very similar
2107   // to what InstCombineCalls does, but here we are only lowering calls
2108   // to fortified library functions (e.g. __memcpy_chk) that have the default
2109   // "don't know" as the objectsize.  Anything else should be left alone.
2110   FortifiedLibCallSimplifier Simplifier(TLInfo, true);
2111   IRBuilder<> Builder(CI);
2112   if (Value *V = Simplifier.optimizeCall(CI, Builder)) {
2113     CI->replaceAllUsesWith(V);
2114     CI->eraseFromParent();
2115     return true;
2116   }
2117 
2118   return false;
2119 }
2120 
2121 /// Look for opportunities to duplicate return instructions to the predecessor
2122 /// to enable tail call optimizations. The case it is currently looking for is:
2123 /// @code
2124 /// bb0:
2125 ///   %tmp0 = tail call i32 @f0()
2126 ///   br label %return
2127 /// bb1:
2128 ///   %tmp1 = tail call i32 @f1()
2129 ///   br label %return
2130 /// bb2:
2131 ///   %tmp2 = tail call i32 @f2()
2132 ///   br label %return
2133 /// return:
2134 ///   %retval = phi i32 [ %tmp0, %bb0 ], [ %tmp1, %bb1 ], [ %tmp2, %bb2 ]
2135 ///   ret i32 %retval
2136 /// @endcode
2137 ///
2138 /// =>
2139 ///
2140 /// @code
2141 /// bb0:
2142 ///   %tmp0 = tail call i32 @f0()
2143 ///   ret i32 %tmp0
2144 /// bb1:
2145 ///   %tmp1 = tail call i32 @f1()
2146 ///   ret i32 %tmp1
2147 /// bb2:
2148 ///   %tmp2 = tail call i32 @f2()
2149 ///   ret i32 %tmp2
2150 /// @endcode
2151 bool CodeGenPrepare::dupRetToEnableTailCallOpts(BasicBlock *BB, bool &ModifiedDT) {
2152   ReturnInst *RetI = dyn_cast<ReturnInst>(BB->getTerminator());
2153   if (!RetI)
2154     return false;
2155 
2156   PHINode *PN = nullptr;
2157   ExtractValueInst *EVI = nullptr;
2158   BitCastInst *BCI = nullptr;
2159   Value *V = RetI->getReturnValue();
2160   if (V) {
2161     BCI = dyn_cast<BitCastInst>(V);
2162     if (BCI)
2163       V = BCI->getOperand(0);
2164 
2165     EVI = dyn_cast<ExtractValueInst>(V);
2166     if (EVI) {
2167       V = EVI->getOperand(0);
2168       if (!std::all_of(EVI->idx_begin(), EVI->idx_end(),
2169                        [](unsigned idx) { return idx == 0; }))
2170         return false;
2171     }
2172 
2173     PN = dyn_cast<PHINode>(V);
2174     if (!PN)
2175       return false;
2176   }
2177 
2178   if (PN && PN->getParent() != BB)
2179     return false;
2180 
2181   // Make sure there are no instructions between the PHI and return, or that the
2182   // return is the first instruction in the block.
2183   if (PN) {
2184     BasicBlock::iterator BI = BB->begin();
2185     // Skip over debug and the bitcast.
2186     do {
2187       ++BI;
2188     } while (isa<DbgInfoIntrinsic>(BI) || &*BI == BCI || &*BI == EVI);
2189     if (&*BI != RetI)
2190       return false;
2191   } else {
2192     BasicBlock::iterator BI = BB->begin();
2193     while (isa<DbgInfoIntrinsic>(BI)) ++BI;
2194     if (&*BI != RetI)
2195       return false;
2196   }
2197 
2198   /// Only dup the ReturnInst if the CallInst is likely to be emitted as a tail
2199   /// call.
2200   const Function *F = BB->getParent();
2201   SmallVector<BasicBlock*, 4> TailCallBBs;
2202   if (PN) {
2203     for (unsigned I = 0, E = PN->getNumIncomingValues(); I != E; ++I) {
2204       // Look through bitcasts.
2205       Value *IncomingVal = PN->getIncomingValue(I)->stripPointerCasts();
2206       CallInst *CI = dyn_cast<CallInst>(IncomingVal);
2207       BasicBlock *PredBB = PN->getIncomingBlock(I);
2208       // Make sure the phi value is indeed produced by the tail call.
2209       if (CI && CI->hasOneUse() && CI->getParent() == PredBB &&
2210           TLI->mayBeEmittedAsTailCall(CI) &&
2211           attributesPermitTailCall(F, CI, RetI, *TLI))
2212         TailCallBBs.push_back(PredBB);
2213     }
2214   } else {
2215     SmallPtrSet<BasicBlock*, 4> VisitedBBs;
2216     for (pred_iterator PI = pred_begin(BB), PE = pred_end(BB); PI != PE; ++PI) {
2217       if (!VisitedBBs.insert(*PI).second)
2218         continue;
2219 
2220       BasicBlock::InstListType &InstList = (*PI)->getInstList();
2221       BasicBlock::InstListType::reverse_iterator RI = InstList.rbegin();
2222       BasicBlock::InstListType::reverse_iterator RE = InstList.rend();
2223       do { ++RI; } while (RI != RE && isa<DbgInfoIntrinsic>(&*RI));
2224       if (RI == RE)
2225         continue;
2226 
2227       CallInst *CI = dyn_cast<CallInst>(&*RI);
2228       if (CI && CI->use_empty() && TLI->mayBeEmittedAsTailCall(CI) &&
2229           attributesPermitTailCall(F, CI, RetI, *TLI))
2230         TailCallBBs.push_back(*PI);
2231     }
2232   }
2233 
2234   bool Changed = false;
2235   for (auto const &TailCallBB : TailCallBBs) {
2236     // Make sure the call instruction is followed by an unconditional branch to
2237     // the return block.
2238     BranchInst *BI = dyn_cast<BranchInst>(TailCallBB->getTerminator());
2239     if (!BI || !BI->isUnconditional() || BI->getSuccessor(0) != BB)
2240       continue;
2241 
2242     // Duplicate the return into TailCallBB.
2243     (void)FoldReturnIntoUncondBranch(RetI, BB, TailCallBB);
2244     assert(!VerifyBFIUpdates ||
2245            BFI->getBlockFreq(BB) >= BFI->getBlockFreq(TailCallBB));
2246     BFI->setBlockFreq(
2247         BB,
2248         (BFI->getBlockFreq(BB) - BFI->getBlockFreq(TailCallBB)).getFrequency());
2249     ModifiedDT = Changed = true;
2250     ++NumRetsDup;
2251   }
2252 
2253   // If we eliminated all predecessors of the block, delete the block now.
2254   if (Changed && !BB->hasAddressTaken() && pred_begin(BB) == pred_end(BB))
2255     BB->eraseFromParent();
2256 
2257   return Changed;
2258 }
2259 
2260 //===----------------------------------------------------------------------===//
2261 // Memory Optimization
2262 //===----------------------------------------------------------------------===//
2263 
2264 namespace {
2265 
2266 /// This is an extended version of TargetLowering::AddrMode
2267 /// which holds actual Value*'s for register values.
2268 struct ExtAddrMode : public TargetLowering::AddrMode {
2269   Value *BaseReg = nullptr;
2270   Value *ScaledReg = nullptr;
2271   Value *OriginalValue = nullptr;
2272   bool InBounds = true;
2273 
2274   enum FieldName {
2275     NoField        = 0x00,
2276     BaseRegField   = 0x01,
2277     BaseGVField    = 0x02,
2278     BaseOffsField  = 0x04,
2279     ScaledRegField = 0x08,
2280     ScaleField     = 0x10,
2281     MultipleFields = 0xff
2282   };
2283 
2284 
2285   ExtAddrMode() = default;
2286 
2287   void print(raw_ostream &OS) const;
2288   void dump() const;
2289 
2290   FieldName compare(const ExtAddrMode &other) {
2291     // First check that the types are the same on each field, as differing types
2292     // is something we can't cope with later on.
2293     if (BaseReg && other.BaseReg &&
2294         BaseReg->getType() != other.BaseReg->getType())
2295       return MultipleFields;
2296     if (BaseGV && other.BaseGV &&
2297         BaseGV->getType() != other.BaseGV->getType())
2298       return MultipleFields;
2299     if (ScaledReg && other.ScaledReg &&
2300         ScaledReg->getType() != other.ScaledReg->getType())
2301       return MultipleFields;
2302 
2303     // Conservatively reject 'inbounds' mismatches.
2304     if (InBounds != other.InBounds)
2305       return MultipleFields;
2306 
2307     // Check each field to see if it differs.
2308     unsigned Result = NoField;
2309     if (BaseReg != other.BaseReg)
2310       Result |= BaseRegField;
2311     if (BaseGV != other.BaseGV)
2312       Result |= BaseGVField;
2313     if (BaseOffs != other.BaseOffs)
2314       Result |= BaseOffsField;
2315     if (ScaledReg != other.ScaledReg)
2316       Result |= ScaledRegField;
2317     // Don't count 0 as being a different scale, because that actually means
2318     // unscaled (which will already be counted by having no ScaledReg).
2319     if (Scale && other.Scale && Scale != other.Scale)
2320       Result |= ScaleField;
2321 
2322     if (countPopulation(Result) > 1)
2323       return MultipleFields;
2324     else
2325       return static_cast<FieldName>(Result);
2326   }
2327 
2328   // An AddrMode is trivial if it involves no calculation i.e. it is just a base
2329   // with no offset.
2330   bool isTrivial() {
2331     // An AddrMode is (BaseGV + BaseReg + BaseOffs + ScaleReg * Scale) so it is
2332     // trivial if at most one of these terms is nonzero, except that BaseGV and
2333     // BaseReg both being zero actually means a null pointer value, which we
2334     // consider to be 'non-zero' here.
2335     return !BaseOffs && !Scale && !(BaseGV && BaseReg);
2336   }
2337 
2338   Value *GetFieldAsValue(FieldName Field, Type *IntPtrTy) {
2339     switch (Field) {
2340     default:
2341       return nullptr;
2342     case BaseRegField:
2343       return BaseReg;
2344     case BaseGVField:
2345       return BaseGV;
2346     case ScaledRegField:
2347       return ScaledReg;
2348     case BaseOffsField:
2349       return ConstantInt::get(IntPtrTy, BaseOffs);
2350     }
2351   }
2352 
2353   void SetCombinedField(FieldName Field, Value *V,
2354                         const SmallVectorImpl<ExtAddrMode> &AddrModes) {
2355     switch (Field) {
2356     default:
2357       llvm_unreachable("Unhandled fields are expected to be rejected earlier");
2358       break;
2359     case ExtAddrMode::BaseRegField:
2360       BaseReg = V;
2361       break;
2362     case ExtAddrMode::BaseGVField:
2363       // A combined BaseGV is an Instruction, not a GlobalValue, so it goes
2364       // in the BaseReg field.
2365       assert(BaseReg == nullptr);
2366       BaseReg = V;
2367       BaseGV = nullptr;
2368       break;
2369     case ExtAddrMode::ScaledRegField:
2370       ScaledReg = V;
2371       // If we have a mix of scaled and unscaled addrmodes then we want scale
2372       // to be the scale and not zero.
2373       if (!Scale)
2374         for (const ExtAddrMode &AM : AddrModes)
2375           if (AM.Scale) {
2376             Scale = AM.Scale;
2377             break;
2378           }
2379       break;
2380     case ExtAddrMode::BaseOffsField:
2381       // The offset is no longer a constant, so it goes in ScaledReg with a
2382       // scale of 1.
2383       assert(ScaledReg == nullptr);
2384       ScaledReg = V;
2385       Scale = 1;
2386       BaseOffs = 0;
2387       break;
2388     }
2389   }
2390 };
2391 
2392 } // end anonymous namespace
2393 
2394 #ifndef NDEBUG
2395 static inline raw_ostream &operator<<(raw_ostream &OS, const ExtAddrMode &AM) {
2396   AM.print(OS);
2397   return OS;
2398 }
2399 #endif
2400 
2401 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2402 void ExtAddrMode::print(raw_ostream &OS) const {
2403   bool NeedPlus = false;
2404   OS << "[";
2405   if (InBounds)
2406     OS << "inbounds ";
2407   if (BaseGV) {
2408     OS << (NeedPlus ? " + " : "")
2409        << "GV:";
2410     BaseGV->printAsOperand(OS, /*PrintType=*/false);
2411     NeedPlus = true;
2412   }
2413 
2414   if (BaseOffs) {
2415     OS << (NeedPlus ? " + " : "")
2416        << BaseOffs;
2417     NeedPlus = true;
2418   }
2419 
2420   if (BaseReg) {
2421     OS << (NeedPlus ? " + " : "")
2422        << "Base:";
2423     BaseReg->printAsOperand(OS, /*PrintType=*/false);
2424     NeedPlus = true;
2425   }
2426   if (Scale) {
2427     OS << (NeedPlus ? " + " : "")
2428        << Scale << "*";
2429     ScaledReg->printAsOperand(OS, /*PrintType=*/false);
2430   }
2431 
2432   OS << ']';
2433 }
2434 
2435 LLVM_DUMP_METHOD void ExtAddrMode::dump() const {
2436   print(dbgs());
2437   dbgs() << '\n';
2438 }
2439 #endif
2440 
2441 namespace {
2442 
2443 /// This class provides transaction based operation on the IR.
2444 /// Every change made through this class is recorded in the internal state and
2445 /// can be undone (rollback) until commit is called.
2446 /// CGP does not check if instructions could be speculatively executed when
2447 /// moved. Preserving the original location would pessimize the debugging
2448 /// experience, as well as negatively impact the quality of sample PGO.
2449 class TypePromotionTransaction {
2450   /// This represents the common interface of the individual transaction.
2451   /// Each class implements the logic for doing one specific modification on
2452   /// the IR via the TypePromotionTransaction.
2453   class TypePromotionAction {
2454   protected:
2455     /// The Instruction modified.
2456     Instruction *Inst;
2457 
2458   public:
2459     /// Constructor of the action.
2460     /// The constructor performs the related action on the IR.
2461     TypePromotionAction(Instruction *Inst) : Inst(Inst) {}
2462 
2463     virtual ~TypePromotionAction() = default;
2464 
2465     /// Undo the modification done by this action.
2466     /// When this method is called, the IR must be in the same state as it was
2467     /// before this action was applied.
2468     /// \pre Undoing the action works if and only if the IR is in the exact same
2469     /// state as it was directly after this action was applied.
2470     virtual void undo() = 0;
2471 
2472     /// Advocate every change made by this action.
2473     /// When the results on the IR of the action are to be kept, it is important
2474     /// to call this function, otherwise hidden information may be kept forever.
2475     virtual void commit() {
2476       // Nothing to be done, this action is not doing anything.
2477     }
2478   };
2479 
2480   /// Utility to remember the position of an instruction.
2481   class InsertionHandler {
2482     /// Position of an instruction.
2483     /// Either an instruction:
2484     /// - Is the first in a basic block: BB is used.
2485     /// - Has a previous instruction: PrevInst is used.
2486     union {
2487       Instruction *PrevInst;
2488       BasicBlock *BB;
2489     } Point;
2490 
2491     /// Remember whether or not the instruction had a previous instruction.
2492     bool HasPrevInstruction;
2493 
2494   public:
2495     /// Record the position of \p Inst.
2496     InsertionHandler(Instruction *Inst) {
2497       BasicBlock::iterator It = Inst->getIterator();
2498       HasPrevInstruction = (It != (Inst->getParent()->begin()));
2499       if (HasPrevInstruction)
2500         Point.PrevInst = &*--It;
2501       else
2502         Point.BB = Inst->getParent();
2503     }
2504 
2505     /// Insert \p Inst at the recorded position.
2506     void insert(Instruction *Inst) {
2507       if (HasPrevInstruction) {
2508         if (Inst->getParent())
2509           Inst->removeFromParent();
2510         Inst->insertAfter(Point.PrevInst);
2511       } else {
2512         Instruction *Position = &*Point.BB->getFirstInsertionPt();
2513         if (Inst->getParent())
2514           Inst->moveBefore(Position);
2515         else
2516           Inst->insertBefore(Position);
2517       }
2518     }
2519   };
2520 
2521   /// Move an instruction before another.
2522   class InstructionMoveBefore : public TypePromotionAction {
2523     /// Original position of the instruction.
2524     InsertionHandler Position;
2525 
2526   public:
2527     /// Move \p Inst before \p Before.
2528     InstructionMoveBefore(Instruction *Inst, Instruction *Before)
2529         : TypePromotionAction(Inst), Position(Inst) {
2530       LLVM_DEBUG(dbgs() << "Do: move: " << *Inst << "\nbefore: " << *Before
2531                         << "\n");
2532       Inst->moveBefore(Before);
2533     }
2534 
2535     /// Move the instruction back to its original position.
2536     void undo() override {
2537       LLVM_DEBUG(dbgs() << "Undo: moveBefore: " << *Inst << "\n");
2538       Position.insert(Inst);
2539     }
2540   };
2541 
2542   /// Set the operand of an instruction with a new value.
2543   class OperandSetter : public TypePromotionAction {
2544     /// Original operand of the instruction.
2545     Value *Origin;
2546 
2547     /// Index of the modified instruction.
2548     unsigned Idx;
2549 
2550   public:
2551     /// Set \p Idx operand of \p Inst with \p NewVal.
2552     OperandSetter(Instruction *Inst, unsigned Idx, Value *NewVal)
2553         : TypePromotionAction(Inst), Idx(Idx) {
2554       LLVM_DEBUG(dbgs() << "Do: setOperand: " << Idx << "\n"
2555                         << "for:" << *Inst << "\n"
2556                         << "with:" << *NewVal << "\n");
2557       Origin = Inst->getOperand(Idx);
2558       Inst->setOperand(Idx, NewVal);
2559     }
2560 
2561     /// Restore the original value of the instruction.
2562     void undo() override {
2563       LLVM_DEBUG(dbgs() << "Undo: setOperand:" << Idx << "\n"
2564                         << "for: " << *Inst << "\n"
2565                         << "with: " << *Origin << "\n");
2566       Inst->setOperand(Idx, Origin);
2567     }
2568   };
2569 
2570   /// Hide the operands of an instruction.
2571   /// Do as if this instruction was not using any of its operands.
2572   class OperandsHider : public TypePromotionAction {
2573     /// The list of original operands.
2574     SmallVector<Value *, 4> OriginalValues;
2575 
2576   public:
2577     /// Remove \p Inst from the uses of the operands of \p Inst.
2578     OperandsHider(Instruction *Inst) : TypePromotionAction(Inst) {
2579       LLVM_DEBUG(dbgs() << "Do: OperandsHider: " << *Inst << "\n");
2580       unsigned NumOpnds = Inst->getNumOperands();
2581       OriginalValues.reserve(NumOpnds);
2582       for (unsigned It = 0; It < NumOpnds; ++It) {
2583         // Save the current operand.
2584         Value *Val = Inst->getOperand(It);
2585         OriginalValues.push_back(Val);
2586         // Set a dummy one.
2587         // We could use OperandSetter here, but that would imply an overhead
2588         // that we are not willing to pay.
2589         Inst->setOperand(It, UndefValue::get(Val->getType()));
2590       }
2591     }
2592 
2593     /// Restore the original list of uses.
2594     void undo() override {
2595       LLVM_DEBUG(dbgs() << "Undo: OperandsHider: " << *Inst << "\n");
2596       for (unsigned It = 0, EndIt = OriginalValues.size(); It != EndIt; ++It)
2597         Inst->setOperand(It, OriginalValues[It]);
2598     }
2599   };
2600 
2601   /// Build a truncate instruction.
2602   class TruncBuilder : public TypePromotionAction {
2603     Value *Val;
2604 
2605   public:
2606     /// Build a truncate instruction of \p Opnd producing a \p Ty
2607     /// result.
2608     /// trunc Opnd to Ty.
2609     TruncBuilder(Instruction *Opnd, Type *Ty) : TypePromotionAction(Opnd) {
2610       IRBuilder<> Builder(Opnd);
2611       Builder.SetCurrentDebugLocation(DebugLoc());
2612       Val = Builder.CreateTrunc(Opnd, Ty, "promoted");
2613       LLVM_DEBUG(dbgs() << "Do: TruncBuilder: " << *Val << "\n");
2614     }
2615 
2616     /// Get the built value.
2617     Value *getBuiltValue() { return Val; }
2618 
2619     /// Remove the built instruction.
2620     void undo() override {
2621       LLVM_DEBUG(dbgs() << "Undo: TruncBuilder: " << *Val << "\n");
2622       if (Instruction *IVal = dyn_cast<Instruction>(Val))
2623         IVal->eraseFromParent();
2624     }
2625   };
2626 
2627   /// Build a sign extension instruction.
2628   class SExtBuilder : public TypePromotionAction {
2629     Value *Val;
2630 
2631   public:
2632     /// Build a sign extension instruction of \p Opnd producing a \p Ty
2633     /// result.
2634     /// sext Opnd to Ty.
2635     SExtBuilder(Instruction *InsertPt, Value *Opnd, Type *Ty)
2636         : TypePromotionAction(InsertPt) {
2637       IRBuilder<> Builder(InsertPt);
2638       Val = Builder.CreateSExt(Opnd, Ty, "promoted");
2639       LLVM_DEBUG(dbgs() << "Do: SExtBuilder: " << *Val << "\n");
2640     }
2641 
2642     /// Get the built value.
2643     Value *getBuiltValue() { return Val; }
2644 
2645     /// Remove the built instruction.
2646     void undo() override {
2647       LLVM_DEBUG(dbgs() << "Undo: SExtBuilder: " << *Val << "\n");
2648       if (Instruction *IVal = dyn_cast<Instruction>(Val))
2649         IVal->eraseFromParent();
2650     }
2651   };
2652 
2653   /// Build a zero extension instruction.
2654   class ZExtBuilder : public TypePromotionAction {
2655     Value *Val;
2656 
2657   public:
2658     /// Build a zero extension instruction of \p Opnd producing a \p Ty
2659     /// result.
2660     /// zext Opnd to Ty.
2661     ZExtBuilder(Instruction *InsertPt, Value *Opnd, Type *Ty)
2662         : TypePromotionAction(InsertPt) {
2663       IRBuilder<> Builder(InsertPt);
2664       Builder.SetCurrentDebugLocation(DebugLoc());
2665       Val = Builder.CreateZExt(Opnd, Ty, "promoted");
2666       LLVM_DEBUG(dbgs() << "Do: ZExtBuilder: " << *Val << "\n");
2667     }
2668 
2669     /// Get the built value.
2670     Value *getBuiltValue() { return Val; }
2671 
2672     /// Remove the built instruction.
2673     void undo() override {
2674       LLVM_DEBUG(dbgs() << "Undo: ZExtBuilder: " << *Val << "\n");
2675       if (Instruction *IVal = dyn_cast<Instruction>(Val))
2676         IVal->eraseFromParent();
2677     }
2678   };
2679 
2680   /// Mutate an instruction to another type.
2681   class TypeMutator : public TypePromotionAction {
2682     /// Record the original type.
2683     Type *OrigTy;
2684 
2685   public:
2686     /// Mutate the type of \p Inst into \p NewTy.
2687     TypeMutator(Instruction *Inst, Type *NewTy)
2688         : TypePromotionAction(Inst), OrigTy(Inst->getType()) {
2689       LLVM_DEBUG(dbgs() << "Do: MutateType: " << *Inst << " with " << *NewTy
2690                         << "\n");
2691       Inst->mutateType(NewTy);
2692     }
2693 
2694     /// Mutate the instruction back to its original type.
2695     void undo() override {
2696       LLVM_DEBUG(dbgs() << "Undo: MutateType: " << *Inst << " with " << *OrigTy
2697                         << "\n");
2698       Inst->mutateType(OrigTy);
2699     }
2700   };
2701 
2702   /// Replace the uses of an instruction by another instruction.
2703   class UsesReplacer : public TypePromotionAction {
2704     /// Helper structure to keep track of the replaced uses.
2705     struct InstructionAndIdx {
2706       /// The instruction using the instruction.
2707       Instruction *Inst;
2708 
2709       /// The index where this instruction is used for Inst.
2710       unsigned Idx;
2711 
2712       InstructionAndIdx(Instruction *Inst, unsigned Idx)
2713           : Inst(Inst), Idx(Idx) {}
2714     };
2715 
2716     /// Keep track of the original uses (pair Instruction, Index).
2717     SmallVector<InstructionAndIdx, 4> OriginalUses;
2718     /// Keep track of the debug users.
2719     SmallVector<DbgValueInst *, 1> DbgValues;
2720 
2721     using use_iterator = SmallVectorImpl<InstructionAndIdx>::iterator;
2722 
2723   public:
2724     /// Replace all the use of \p Inst by \p New.
2725     UsesReplacer(Instruction *Inst, Value *New) : TypePromotionAction(Inst) {
2726       LLVM_DEBUG(dbgs() << "Do: UsersReplacer: " << *Inst << " with " << *New
2727                         << "\n");
2728       // Record the original uses.
2729       for (Use &U : Inst->uses()) {
2730         Instruction *UserI = cast<Instruction>(U.getUser());
2731         OriginalUses.push_back(InstructionAndIdx(UserI, U.getOperandNo()));
2732       }
2733       // Record the debug uses separately. They are not in the instruction's
2734       // use list, but they are replaced by RAUW.
2735       findDbgValues(DbgValues, Inst);
2736 
2737       // Now, we can replace the uses.
2738       Inst->replaceAllUsesWith(New);
2739     }
2740 
2741     /// Reassign the original uses of Inst to Inst.
2742     void undo() override {
2743       LLVM_DEBUG(dbgs() << "Undo: UsersReplacer: " << *Inst << "\n");
2744       for (use_iterator UseIt = OriginalUses.begin(),
2745                         EndIt = OriginalUses.end();
2746            UseIt != EndIt; ++UseIt) {
2747         UseIt->Inst->setOperand(UseIt->Idx, Inst);
2748       }
2749       // RAUW has replaced all original uses with references to the new value,
2750       // including the debug uses. Since we are undoing the replacements,
2751       // the original debug uses must also be reinstated to maintain the
2752       // correctness and utility of debug value instructions.
2753       for (auto *DVI: DbgValues) {
2754         LLVMContext &Ctx = Inst->getType()->getContext();
2755         auto *MV = MetadataAsValue::get(Ctx, ValueAsMetadata::get(Inst));
2756         DVI->setOperand(0, MV);
2757       }
2758     }
2759   };
2760 
2761   /// Remove an instruction from the IR.
2762   class InstructionRemover : public TypePromotionAction {
2763     /// Original position of the instruction.
2764     InsertionHandler Inserter;
2765 
2766     /// Helper structure to hide all the link to the instruction. In other
2767     /// words, this helps to do as if the instruction was removed.
2768     OperandsHider Hider;
2769 
2770     /// Keep track of the uses replaced, if any.
2771     UsesReplacer *Replacer = nullptr;
2772 
2773     /// Keep track of instructions removed.
2774     SetOfInstrs &RemovedInsts;
2775 
2776   public:
2777     /// Remove all reference of \p Inst and optionally replace all its
2778     /// uses with New.
2779     /// \p RemovedInsts Keep track of the instructions removed by this Action.
2780     /// \pre If !Inst->use_empty(), then New != nullptr
2781     InstructionRemover(Instruction *Inst, SetOfInstrs &RemovedInsts,
2782                        Value *New = nullptr)
2783         : TypePromotionAction(Inst), Inserter(Inst), Hider(Inst),
2784           RemovedInsts(RemovedInsts) {
2785       if (New)
2786         Replacer = new UsesReplacer(Inst, New);
2787       LLVM_DEBUG(dbgs() << "Do: InstructionRemover: " << *Inst << "\n");
2788       RemovedInsts.insert(Inst);
2789       /// The instructions removed here will be freed after completing
2790       /// optimizeBlock() for all blocks as we need to keep track of the
2791       /// removed instructions during promotion.
2792       Inst->removeFromParent();
2793     }
2794 
2795     ~InstructionRemover() override { delete Replacer; }
2796 
2797     /// Resurrect the instruction and reassign it to the proper uses if
2798     /// new value was provided when build this action.
2799     void undo() override {
2800       LLVM_DEBUG(dbgs() << "Undo: InstructionRemover: " << *Inst << "\n");
2801       Inserter.insert(Inst);
2802       if (Replacer)
2803         Replacer->undo();
2804       Hider.undo();
2805       RemovedInsts.erase(Inst);
2806     }
2807   };
2808 
2809 public:
2810   /// Restoration point.
2811   /// The restoration point is a pointer to an action instead of an iterator
2812   /// because the iterator may be invalidated but not the pointer.
2813   using ConstRestorationPt = const TypePromotionAction *;
2814 
2815   TypePromotionTransaction(SetOfInstrs &RemovedInsts)
2816       : RemovedInsts(RemovedInsts) {}
2817 
2818   /// Advocate every changes made in that transaction.
2819   void commit();
2820 
2821   /// Undo all the changes made after the given point.
2822   void rollback(ConstRestorationPt Point);
2823 
2824   /// Get the current restoration point.
2825   ConstRestorationPt getRestorationPoint() const;
2826 
2827   /// \name API for IR modification with state keeping to support rollback.
2828   /// @{
2829   /// Same as Instruction::setOperand.
2830   void setOperand(Instruction *Inst, unsigned Idx, Value *NewVal);
2831 
2832   /// Same as Instruction::eraseFromParent.
2833   void eraseInstruction(Instruction *Inst, Value *NewVal = nullptr);
2834 
2835   /// Same as Value::replaceAllUsesWith.
2836   void replaceAllUsesWith(Instruction *Inst, Value *New);
2837 
2838   /// Same as Value::mutateType.
2839   void mutateType(Instruction *Inst, Type *NewTy);
2840 
2841   /// Same as IRBuilder::createTrunc.
2842   Value *createTrunc(Instruction *Opnd, Type *Ty);
2843 
2844   /// Same as IRBuilder::createSExt.
2845   Value *createSExt(Instruction *Inst, Value *Opnd, Type *Ty);
2846 
2847   /// Same as IRBuilder::createZExt.
2848   Value *createZExt(Instruction *Inst, Value *Opnd, Type *Ty);
2849 
2850   /// Same as Instruction::moveBefore.
2851   void moveBefore(Instruction *Inst, Instruction *Before);
2852   /// @}
2853 
2854 private:
2855   /// The ordered list of actions made so far.
2856   SmallVector<std::unique_ptr<TypePromotionAction>, 16> Actions;
2857 
2858   using CommitPt = SmallVectorImpl<std::unique_ptr<TypePromotionAction>>::iterator;
2859 
2860   SetOfInstrs &RemovedInsts;
2861 };
2862 
2863 } // end anonymous namespace
2864 
2865 void TypePromotionTransaction::setOperand(Instruction *Inst, unsigned Idx,
2866                                           Value *NewVal) {
2867   Actions.push_back(std::make_unique<TypePromotionTransaction::OperandSetter>(
2868       Inst, Idx, NewVal));
2869 }
2870 
2871 void TypePromotionTransaction::eraseInstruction(Instruction *Inst,
2872                                                 Value *NewVal) {
2873   Actions.push_back(
2874       std::make_unique<TypePromotionTransaction::InstructionRemover>(
2875           Inst, RemovedInsts, NewVal));
2876 }
2877 
2878 void TypePromotionTransaction::replaceAllUsesWith(Instruction *Inst,
2879                                                   Value *New) {
2880   Actions.push_back(
2881       std::make_unique<TypePromotionTransaction::UsesReplacer>(Inst, New));
2882 }
2883 
2884 void TypePromotionTransaction::mutateType(Instruction *Inst, Type *NewTy) {
2885   Actions.push_back(
2886       std::make_unique<TypePromotionTransaction::TypeMutator>(Inst, NewTy));
2887 }
2888 
2889 Value *TypePromotionTransaction::createTrunc(Instruction *Opnd,
2890                                              Type *Ty) {
2891   std::unique_ptr<TruncBuilder> Ptr(new TruncBuilder(Opnd, Ty));
2892   Value *Val = Ptr->getBuiltValue();
2893   Actions.push_back(std::move(Ptr));
2894   return Val;
2895 }
2896 
2897 Value *TypePromotionTransaction::createSExt(Instruction *Inst,
2898                                             Value *Opnd, Type *Ty) {
2899   std::unique_ptr<SExtBuilder> Ptr(new SExtBuilder(Inst, Opnd, Ty));
2900   Value *Val = Ptr->getBuiltValue();
2901   Actions.push_back(std::move(Ptr));
2902   return Val;
2903 }
2904 
2905 Value *TypePromotionTransaction::createZExt(Instruction *Inst,
2906                                             Value *Opnd, Type *Ty) {
2907   std::unique_ptr<ZExtBuilder> Ptr(new ZExtBuilder(Inst, Opnd, Ty));
2908   Value *Val = Ptr->getBuiltValue();
2909   Actions.push_back(std::move(Ptr));
2910   return Val;
2911 }
2912 
2913 void TypePromotionTransaction::moveBefore(Instruction *Inst,
2914                                           Instruction *Before) {
2915   Actions.push_back(
2916       std::make_unique<TypePromotionTransaction::InstructionMoveBefore>(
2917           Inst, Before));
2918 }
2919 
2920 TypePromotionTransaction::ConstRestorationPt
2921 TypePromotionTransaction::getRestorationPoint() const {
2922   return !Actions.empty() ? Actions.back().get() : nullptr;
2923 }
2924 
2925 void TypePromotionTransaction::commit() {
2926   for (CommitPt It = Actions.begin(), EndIt = Actions.end(); It != EndIt;
2927        ++It)
2928     (*It)->commit();
2929   Actions.clear();
2930 }
2931 
2932 void TypePromotionTransaction::rollback(
2933     TypePromotionTransaction::ConstRestorationPt Point) {
2934   while (!Actions.empty() && Point != Actions.back().get()) {
2935     std::unique_ptr<TypePromotionAction> Curr = Actions.pop_back_val();
2936     Curr->undo();
2937   }
2938 }
2939 
2940 namespace {
2941 
2942 /// A helper class for matching addressing modes.
2943 ///
2944 /// This encapsulates the logic for matching the target-legal addressing modes.
2945 class AddressingModeMatcher {
2946   SmallVectorImpl<Instruction*> &AddrModeInsts;
2947   const TargetLowering &TLI;
2948   const TargetRegisterInfo &TRI;
2949   const DataLayout &DL;
2950 
2951   /// AccessTy/MemoryInst - This is the type for the access (e.g. double) and
2952   /// the memory instruction that we're computing this address for.
2953   Type *AccessTy;
2954   unsigned AddrSpace;
2955   Instruction *MemoryInst;
2956 
2957   /// This is the addressing mode that we're building up. This is
2958   /// part of the return value of this addressing mode matching stuff.
2959   ExtAddrMode &AddrMode;
2960 
2961   /// The instructions inserted by other CodeGenPrepare optimizations.
2962   const SetOfInstrs &InsertedInsts;
2963 
2964   /// A map from the instructions to their type before promotion.
2965   InstrToOrigTy &PromotedInsts;
2966 
2967   /// The ongoing transaction where every action should be registered.
2968   TypePromotionTransaction &TPT;
2969 
2970   // A GEP which has too large offset to be folded into the addressing mode.
2971   std::pair<AssertingVH<GetElementPtrInst>, int64_t> &LargeOffsetGEP;
2972 
2973   /// This is set to true when we should not do profitability checks.
2974   /// When true, IsProfitableToFoldIntoAddressingMode always returns true.
2975   bool IgnoreProfitability;
2976 
2977   /// True if we are optimizing for size.
2978   bool OptSize;
2979 
2980   ProfileSummaryInfo *PSI;
2981   BlockFrequencyInfo *BFI;
2982 
2983   AddressingModeMatcher(
2984       SmallVectorImpl<Instruction *> &AMI, const TargetLowering &TLI,
2985       const TargetRegisterInfo &TRI, Type *AT, unsigned AS, Instruction *MI,
2986       ExtAddrMode &AM, const SetOfInstrs &InsertedInsts,
2987       InstrToOrigTy &PromotedInsts, TypePromotionTransaction &TPT,
2988       std::pair<AssertingVH<GetElementPtrInst>, int64_t> &LargeOffsetGEP,
2989       bool OptSize, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI)
2990       : AddrModeInsts(AMI), TLI(TLI), TRI(TRI),
2991         DL(MI->getModule()->getDataLayout()), AccessTy(AT), AddrSpace(AS),
2992         MemoryInst(MI), AddrMode(AM), InsertedInsts(InsertedInsts),
2993         PromotedInsts(PromotedInsts), TPT(TPT), LargeOffsetGEP(LargeOffsetGEP),
2994         OptSize(OptSize), PSI(PSI), BFI(BFI) {
2995     IgnoreProfitability = false;
2996   }
2997 
2998 public:
2999   /// Find the maximal addressing mode that a load/store of V can fold,
3000   /// give an access type of AccessTy.  This returns a list of involved
3001   /// instructions in AddrModeInsts.
3002   /// \p InsertedInsts The instructions inserted by other CodeGenPrepare
3003   /// optimizations.
3004   /// \p PromotedInsts maps the instructions to their type before promotion.
3005   /// \p The ongoing transaction where every action should be registered.
3006   static ExtAddrMode
3007   Match(Value *V, Type *AccessTy, unsigned AS, Instruction *MemoryInst,
3008         SmallVectorImpl<Instruction *> &AddrModeInsts,
3009         const TargetLowering &TLI, const TargetRegisterInfo &TRI,
3010         const SetOfInstrs &InsertedInsts, InstrToOrigTy &PromotedInsts,
3011         TypePromotionTransaction &TPT,
3012         std::pair<AssertingVH<GetElementPtrInst>, int64_t> &LargeOffsetGEP,
3013         bool OptSize, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI) {
3014     ExtAddrMode Result;
3015 
3016     bool Success = AddressingModeMatcher(AddrModeInsts, TLI, TRI, AccessTy, AS,
3017                                          MemoryInst, Result, InsertedInsts,
3018                                          PromotedInsts, TPT, LargeOffsetGEP,
3019                                          OptSize, PSI, BFI)
3020                        .matchAddr(V, 0);
3021     (void)Success; assert(Success && "Couldn't select *anything*?");
3022     return Result;
3023   }
3024 
3025 private:
3026   bool matchScaledValue(Value *ScaleReg, int64_t Scale, unsigned Depth);
3027   bool matchAddr(Value *Addr, unsigned Depth);
3028   bool matchOperationAddr(User *AddrInst, unsigned Opcode, unsigned Depth,
3029                           bool *MovedAway = nullptr);
3030   bool isProfitableToFoldIntoAddressingMode(Instruction *I,
3031                                             ExtAddrMode &AMBefore,
3032                                             ExtAddrMode &AMAfter);
3033   bool valueAlreadyLiveAtInst(Value *Val, Value *KnownLive1, Value *KnownLive2);
3034   bool isPromotionProfitable(unsigned NewCost, unsigned OldCost,
3035                              Value *PromotedOperand) const;
3036 };
3037 
3038 class PhiNodeSet;
3039 
3040 /// An iterator for PhiNodeSet.
3041 class PhiNodeSetIterator {
3042   PhiNodeSet * const Set;
3043   size_t CurrentIndex = 0;
3044 
3045 public:
3046   /// The constructor. Start should point to either a valid element, or be equal
3047   /// to the size of the underlying SmallVector of the PhiNodeSet.
3048   PhiNodeSetIterator(PhiNodeSet * const Set, size_t Start);
3049   PHINode * operator*() const;
3050   PhiNodeSetIterator& operator++();
3051   bool operator==(const PhiNodeSetIterator &RHS) const;
3052   bool operator!=(const PhiNodeSetIterator &RHS) const;
3053 };
3054 
3055 /// Keeps a set of PHINodes.
3056 ///
3057 /// This is a minimal set implementation for a specific use case:
3058 /// It is very fast when there are very few elements, but also provides good
3059 /// performance when there are many. It is similar to SmallPtrSet, but also
3060 /// provides iteration by insertion order, which is deterministic and stable
3061 /// across runs. It is also similar to SmallSetVector, but provides removing
3062 /// elements in O(1) time. This is achieved by not actually removing the element
3063 /// from the underlying vector, so comes at the cost of using more memory, but
3064 /// that is fine, since PhiNodeSets are used as short lived objects.
3065 class PhiNodeSet {
3066   friend class PhiNodeSetIterator;
3067 
3068   using MapType = SmallDenseMap<PHINode *, size_t, 32>;
3069   using iterator =  PhiNodeSetIterator;
3070 
3071   /// Keeps the elements in the order of their insertion in the underlying
3072   /// vector. To achieve constant time removal, it never deletes any element.
3073   SmallVector<PHINode *, 32> NodeList;
3074 
3075   /// Keeps the elements in the underlying set implementation. This (and not the
3076   /// NodeList defined above) is the source of truth on whether an element
3077   /// is actually in the collection.
3078   MapType NodeMap;
3079 
3080   /// Points to the first valid (not deleted) element when the set is not empty
3081   /// and the value is not zero. Equals to the size of the underlying vector
3082   /// when the set is empty. When the value is 0, as in the beginning, the
3083   /// first element may or may not be valid.
3084   size_t FirstValidElement = 0;
3085 
3086 public:
3087   /// Inserts a new element to the collection.
3088   /// \returns true if the element is actually added, i.e. was not in the
3089   /// collection before the operation.
3090   bool insert(PHINode *Ptr) {
3091     if (NodeMap.insert(std::make_pair(Ptr, NodeList.size())).second) {
3092       NodeList.push_back(Ptr);
3093       return true;
3094     }
3095     return false;
3096   }
3097 
3098   /// Removes the element from the collection.
3099   /// \returns whether the element is actually removed, i.e. was in the
3100   /// collection before the operation.
3101   bool erase(PHINode *Ptr) {
3102     auto it = NodeMap.find(Ptr);
3103     if (it != NodeMap.end()) {
3104       NodeMap.erase(Ptr);
3105       SkipRemovedElements(FirstValidElement);
3106       return true;
3107     }
3108     return false;
3109   }
3110 
3111   /// Removes all elements and clears the collection.
3112   void clear() {
3113     NodeMap.clear();
3114     NodeList.clear();
3115     FirstValidElement = 0;
3116   }
3117 
3118   /// \returns an iterator that will iterate the elements in the order of
3119   /// insertion.
3120   iterator begin() {
3121     if (FirstValidElement == 0)
3122       SkipRemovedElements(FirstValidElement);
3123     return PhiNodeSetIterator(this, FirstValidElement);
3124   }
3125 
3126   /// \returns an iterator that points to the end of the collection.
3127   iterator end() { return PhiNodeSetIterator(this, NodeList.size()); }
3128 
3129   /// Returns the number of elements in the collection.
3130   size_t size() const {
3131     return NodeMap.size();
3132   }
3133 
3134   /// \returns 1 if the given element is in the collection, and 0 if otherwise.
3135   size_t count(PHINode *Ptr) const {
3136     return NodeMap.count(Ptr);
3137   }
3138 
3139 private:
3140   /// Updates the CurrentIndex so that it will point to a valid element.
3141   ///
3142   /// If the element of NodeList at CurrentIndex is valid, it does not
3143   /// change it. If there are no more valid elements, it updates CurrentIndex
3144   /// to point to the end of the NodeList.
3145   void SkipRemovedElements(size_t &CurrentIndex) {
3146     while (CurrentIndex < NodeList.size()) {
3147       auto it = NodeMap.find(NodeList[CurrentIndex]);
3148       // If the element has been deleted and added again later, NodeMap will
3149       // point to a different index, so CurrentIndex will still be invalid.
3150       if (it != NodeMap.end() && it->second == CurrentIndex)
3151         break;
3152       ++CurrentIndex;
3153     }
3154   }
3155 };
3156 
3157 PhiNodeSetIterator::PhiNodeSetIterator(PhiNodeSet *const Set, size_t Start)
3158     : Set(Set), CurrentIndex(Start) {}
3159 
3160 PHINode * PhiNodeSetIterator::operator*() const {
3161   assert(CurrentIndex < Set->NodeList.size() &&
3162          "PhiNodeSet access out of range");
3163   return Set->NodeList[CurrentIndex];
3164 }
3165 
3166 PhiNodeSetIterator& PhiNodeSetIterator::operator++() {
3167   assert(CurrentIndex < Set->NodeList.size() &&
3168          "PhiNodeSet access out of range");
3169   ++CurrentIndex;
3170   Set->SkipRemovedElements(CurrentIndex);
3171   return *this;
3172 }
3173 
3174 bool PhiNodeSetIterator::operator==(const PhiNodeSetIterator &RHS) const {
3175   return CurrentIndex == RHS.CurrentIndex;
3176 }
3177 
3178 bool PhiNodeSetIterator::operator!=(const PhiNodeSetIterator &RHS) const {
3179   return !((*this) == RHS);
3180 }
3181 
3182 /// Keep track of simplification of Phi nodes.
3183 /// Accept the set of all phi nodes and erase phi node from this set
3184 /// if it is simplified.
3185 class SimplificationTracker {
3186   DenseMap<Value *, Value *> Storage;
3187   const SimplifyQuery &SQ;
3188   // Tracks newly created Phi nodes. The elements are iterated by insertion
3189   // order.
3190   PhiNodeSet AllPhiNodes;
3191   // Tracks newly created Select nodes.
3192   SmallPtrSet<SelectInst *, 32> AllSelectNodes;
3193 
3194 public:
3195   SimplificationTracker(const SimplifyQuery &sq)
3196       : SQ(sq) {}
3197 
3198   Value *Get(Value *V) {
3199     do {
3200       auto SV = Storage.find(V);
3201       if (SV == Storage.end())
3202         return V;
3203       V = SV->second;
3204     } while (true);
3205   }
3206 
3207   Value *Simplify(Value *Val) {
3208     SmallVector<Value *, 32> WorkList;
3209     SmallPtrSet<Value *, 32> Visited;
3210     WorkList.push_back(Val);
3211     while (!WorkList.empty()) {
3212       auto *P = WorkList.pop_back_val();
3213       if (!Visited.insert(P).second)
3214         continue;
3215       if (auto *PI = dyn_cast<Instruction>(P))
3216         if (Value *V = SimplifyInstruction(cast<Instruction>(PI), SQ)) {
3217           for (auto *U : PI->users())
3218             WorkList.push_back(cast<Value>(U));
3219           Put(PI, V);
3220           PI->replaceAllUsesWith(V);
3221           if (auto *PHI = dyn_cast<PHINode>(PI))
3222             AllPhiNodes.erase(PHI);
3223           if (auto *Select = dyn_cast<SelectInst>(PI))
3224             AllSelectNodes.erase(Select);
3225           PI->eraseFromParent();
3226         }
3227     }
3228     return Get(Val);
3229   }
3230 
3231   void Put(Value *From, Value *To) {
3232     Storage.insert({ From, To });
3233   }
3234 
3235   void ReplacePhi(PHINode *From, PHINode *To) {
3236     Value* OldReplacement = Get(From);
3237     while (OldReplacement != From) {
3238       From = To;
3239       To = dyn_cast<PHINode>(OldReplacement);
3240       OldReplacement = Get(From);
3241     }
3242     assert(To && Get(To) == To && "Replacement PHI node is already replaced.");
3243     Put(From, To);
3244     From->replaceAllUsesWith(To);
3245     AllPhiNodes.erase(From);
3246     From->eraseFromParent();
3247   }
3248 
3249   PhiNodeSet& newPhiNodes() { return AllPhiNodes; }
3250 
3251   void insertNewPhi(PHINode *PN) { AllPhiNodes.insert(PN); }
3252 
3253   void insertNewSelect(SelectInst *SI) { AllSelectNodes.insert(SI); }
3254 
3255   unsigned countNewPhiNodes() const { return AllPhiNodes.size(); }
3256 
3257   unsigned countNewSelectNodes() const { return AllSelectNodes.size(); }
3258 
3259   void destroyNewNodes(Type *CommonType) {
3260     // For safe erasing, replace the uses with dummy value first.
3261     auto *Dummy = UndefValue::get(CommonType);
3262     for (auto *I : AllPhiNodes) {
3263       I->replaceAllUsesWith(Dummy);
3264       I->eraseFromParent();
3265     }
3266     AllPhiNodes.clear();
3267     for (auto *I : AllSelectNodes) {
3268       I->replaceAllUsesWith(Dummy);
3269       I->eraseFromParent();
3270     }
3271     AllSelectNodes.clear();
3272   }
3273 };
3274 
3275 /// A helper class for combining addressing modes.
3276 class AddressingModeCombiner {
3277   typedef DenseMap<Value *, Value *> FoldAddrToValueMapping;
3278   typedef std::pair<PHINode *, PHINode *> PHIPair;
3279 
3280 private:
3281   /// The addressing modes we've collected.
3282   SmallVector<ExtAddrMode, 16> AddrModes;
3283 
3284   /// The field in which the AddrModes differ, when we have more than one.
3285   ExtAddrMode::FieldName DifferentField = ExtAddrMode::NoField;
3286 
3287   /// Are the AddrModes that we have all just equal to their original values?
3288   bool AllAddrModesTrivial = true;
3289 
3290   /// Common Type for all different fields in addressing modes.
3291   Type *CommonType;
3292 
3293   /// SimplifyQuery for simplifyInstruction utility.
3294   const SimplifyQuery &SQ;
3295 
3296   /// Original Address.
3297   Value *Original;
3298 
3299 public:
3300   AddressingModeCombiner(const SimplifyQuery &_SQ, Value *OriginalValue)
3301       : CommonType(nullptr), SQ(_SQ), Original(OriginalValue) {}
3302 
3303   /// Get the combined AddrMode
3304   const ExtAddrMode &getAddrMode() const {
3305     return AddrModes[0];
3306   }
3307 
3308   /// Add a new AddrMode if it's compatible with the AddrModes we already
3309   /// have.
3310   /// \return True iff we succeeded in doing so.
3311   bool addNewAddrMode(ExtAddrMode &NewAddrMode) {
3312     // Take note of if we have any non-trivial AddrModes, as we need to detect
3313     // when all AddrModes are trivial as then we would introduce a phi or select
3314     // which just duplicates what's already there.
3315     AllAddrModesTrivial = AllAddrModesTrivial && NewAddrMode.isTrivial();
3316 
3317     // If this is the first addrmode then everything is fine.
3318     if (AddrModes.empty()) {
3319       AddrModes.emplace_back(NewAddrMode);
3320       return true;
3321     }
3322 
3323     // Figure out how different this is from the other address modes, which we
3324     // can do just by comparing against the first one given that we only care
3325     // about the cumulative difference.
3326     ExtAddrMode::FieldName ThisDifferentField =
3327       AddrModes[0].compare(NewAddrMode);
3328     if (DifferentField == ExtAddrMode::NoField)
3329       DifferentField = ThisDifferentField;
3330     else if (DifferentField != ThisDifferentField)
3331       DifferentField = ExtAddrMode::MultipleFields;
3332 
3333     // If NewAddrMode differs in more than one dimension we cannot handle it.
3334     bool CanHandle = DifferentField != ExtAddrMode::MultipleFields;
3335 
3336     // If Scale Field is different then we reject.
3337     CanHandle = CanHandle && DifferentField != ExtAddrMode::ScaleField;
3338 
3339     // We also must reject the case when base offset is different and
3340     // scale reg is not null, we cannot handle this case due to merge of
3341     // different offsets will be used as ScaleReg.
3342     CanHandle = CanHandle && (DifferentField != ExtAddrMode::BaseOffsField ||
3343                               !NewAddrMode.ScaledReg);
3344 
3345     // We also must reject the case when GV is different and BaseReg installed
3346     // due to we want to use base reg as a merge of GV values.
3347     CanHandle = CanHandle && (DifferentField != ExtAddrMode::BaseGVField ||
3348                               !NewAddrMode.HasBaseReg);
3349 
3350     // Even if NewAddMode is the same we still need to collect it due to
3351     // original value is different. And later we will need all original values
3352     // as anchors during finding the common Phi node.
3353     if (CanHandle)
3354       AddrModes.emplace_back(NewAddrMode);
3355     else
3356       AddrModes.clear();
3357 
3358     return CanHandle;
3359   }
3360 
3361   /// Combine the addressing modes we've collected into a single
3362   /// addressing mode.
3363   /// \return True iff we successfully combined them or we only had one so
3364   /// didn't need to combine them anyway.
3365   bool combineAddrModes() {
3366     // If we have no AddrModes then they can't be combined.
3367     if (AddrModes.size() == 0)
3368       return false;
3369 
3370     // A single AddrMode can trivially be combined.
3371     if (AddrModes.size() == 1 || DifferentField == ExtAddrMode::NoField)
3372       return true;
3373 
3374     // If the AddrModes we collected are all just equal to the value they are
3375     // derived from then combining them wouldn't do anything useful.
3376     if (AllAddrModesTrivial)
3377       return false;
3378 
3379     if (!addrModeCombiningAllowed())
3380       return false;
3381 
3382     // Build a map between <original value, basic block where we saw it> to
3383     // value of base register.
3384     // Bail out if there is no common type.
3385     FoldAddrToValueMapping Map;
3386     if (!initializeMap(Map))
3387       return false;
3388 
3389     Value *CommonValue = findCommon(Map);
3390     if (CommonValue)
3391       AddrModes[0].SetCombinedField(DifferentField, CommonValue, AddrModes);
3392     return CommonValue != nullptr;
3393   }
3394 
3395 private:
3396   /// Initialize Map with anchor values. For address seen
3397   /// we set the value of different field saw in this address.
3398   /// At the same time we find a common type for different field we will
3399   /// use to create new Phi/Select nodes. Keep it in CommonType field.
3400   /// Return false if there is no common type found.
3401   bool initializeMap(FoldAddrToValueMapping &Map) {
3402     // Keep track of keys where the value is null. We will need to replace it
3403     // with constant null when we know the common type.
3404     SmallVector<Value *, 2> NullValue;
3405     Type *IntPtrTy = SQ.DL.getIntPtrType(AddrModes[0].OriginalValue->getType());
3406     for (auto &AM : AddrModes) {
3407       Value *DV = AM.GetFieldAsValue(DifferentField, IntPtrTy);
3408       if (DV) {
3409         auto *Type = DV->getType();
3410         if (CommonType && CommonType != Type)
3411           return false;
3412         CommonType = Type;
3413         Map[AM.OriginalValue] = DV;
3414       } else {
3415         NullValue.push_back(AM.OriginalValue);
3416       }
3417     }
3418     assert(CommonType && "At least one non-null value must be!");
3419     for (auto *V : NullValue)
3420       Map[V] = Constant::getNullValue(CommonType);
3421     return true;
3422   }
3423 
3424   /// We have mapping between value A and other value B where B was a field in
3425   /// addressing mode represented by A. Also we have an original value C
3426   /// representing an address we start with. Traversing from C through phi and
3427   /// selects we ended up with A's in a map. This utility function tries to find
3428   /// a value V which is a field in addressing mode C and traversing through phi
3429   /// nodes and selects we will end up in corresponded values B in a map.
3430   /// The utility will create a new Phi/Selects if needed.
3431   // The simple example looks as follows:
3432   // BB1:
3433   //   p1 = b1 + 40
3434   //   br cond BB2, BB3
3435   // BB2:
3436   //   p2 = b2 + 40
3437   //   br BB3
3438   // BB3:
3439   //   p = phi [p1, BB1], [p2, BB2]
3440   //   v = load p
3441   // Map is
3442   //   p1 -> b1
3443   //   p2 -> b2
3444   // Request is
3445   //   p -> ?
3446   // The function tries to find or build phi [b1, BB1], [b2, BB2] in BB3.
3447   Value *findCommon(FoldAddrToValueMapping &Map) {
3448     // Tracks the simplification of newly created phi nodes. The reason we use
3449     // this mapping is because we will add new created Phi nodes in AddrToBase.
3450     // Simplification of Phi nodes is recursive, so some Phi node may
3451     // be simplified after we added it to AddrToBase. In reality this
3452     // simplification is possible only if original phi/selects were not
3453     // simplified yet.
3454     // Using this mapping we can find the current value in AddrToBase.
3455     SimplificationTracker ST(SQ);
3456 
3457     // First step, DFS to create PHI nodes for all intermediate blocks.
3458     // Also fill traverse order for the second step.
3459     SmallVector<Value *, 32> TraverseOrder;
3460     InsertPlaceholders(Map, TraverseOrder, ST);
3461 
3462     // Second Step, fill new nodes by merged values and simplify if possible.
3463     FillPlaceholders(Map, TraverseOrder, ST);
3464 
3465     if (!AddrSinkNewSelects && ST.countNewSelectNodes() > 0) {
3466       ST.destroyNewNodes(CommonType);
3467       return nullptr;
3468     }
3469 
3470     // Now we'd like to match New Phi nodes to existed ones.
3471     unsigned PhiNotMatchedCount = 0;
3472     if (!MatchPhiSet(ST, AddrSinkNewPhis, PhiNotMatchedCount)) {
3473       ST.destroyNewNodes(CommonType);
3474       return nullptr;
3475     }
3476 
3477     auto *Result = ST.Get(Map.find(Original)->second);
3478     if (Result) {
3479       NumMemoryInstsPhiCreated += ST.countNewPhiNodes() + PhiNotMatchedCount;
3480       NumMemoryInstsSelectCreated += ST.countNewSelectNodes();
3481     }
3482     return Result;
3483   }
3484 
3485   /// Try to match PHI node to Candidate.
3486   /// Matcher tracks the matched Phi nodes.
3487   bool MatchPhiNode(PHINode *PHI, PHINode *Candidate,
3488                     SmallSetVector<PHIPair, 8> &Matcher,
3489                     PhiNodeSet &PhiNodesToMatch) {
3490     SmallVector<PHIPair, 8> WorkList;
3491     Matcher.insert({ PHI, Candidate });
3492     SmallSet<PHINode *, 8> MatchedPHIs;
3493     MatchedPHIs.insert(PHI);
3494     WorkList.push_back({ PHI, Candidate });
3495     SmallSet<PHIPair, 8> Visited;
3496     while (!WorkList.empty()) {
3497       auto Item = WorkList.pop_back_val();
3498       if (!Visited.insert(Item).second)
3499         continue;
3500       // We iterate over all incoming values to Phi to compare them.
3501       // If values are different and both of them Phi and the first one is a
3502       // Phi we added (subject to match) and both of them is in the same basic
3503       // block then we can match our pair if values match. So we state that
3504       // these values match and add it to work list to verify that.
3505       for (auto B : Item.first->blocks()) {
3506         Value *FirstValue = Item.first->getIncomingValueForBlock(B);
3507         Value *SecondValue = Item.second->getIncomingValueForBlock(B);
3508         if (FirstValue == SecondValue)
3509           continue;
3510 
3511         PHINode *FirstPhi = dyn_cast<PHINode>(FirstValue);
3512         PHINode *SecondPhi = dyn_cast<PHINode>(SecondValue);
3513 
3514         // One of them is not Phi or
3515         // The first one is not Phi node from the set we'd like to match or
3516         // Phi nodes from different basic blocks then
3517         // we will not be able to match.
3518         if (!FirstPhi || !SecondPhi || !PhiNodesToMatch.count(FirstPhi) ||
3519             FirstPhi->getParent() != SecondPhi->getParent())
3520           return false;
3521 
3522         // If we already matched them then continue.
3523         if (Matcher.count({ FirstPhi, SecondPhi }))
3524           continue;
3525         // So the values are different and does not match. So we need them to
3526         // match. (But we register no more than one match per PHI node, so that
3527         // we won't later try to replace them twice.)
3528         if (MatchedPHIs.insert(FirstPhi).second)
3529           Matcher.insert({ FirstPhi, SecondPhi });
3530         // But me must check it.
3531         WorkList.push_back({ FirstPhi, SecondPhi });
3532       }
3533     }
3534     return true;
3535   }
3536 
3537   /// For the given set of PHI nodes (in the SimplificationTracker) try
3538   /// to find their equivalents.
3539   /// Returns false if this matching fails and creation of new Phi is disabled.
3540   bool MatchPhiSet(SimplificationTracker &ST, bool AllowNewPhiNodes,
3541                    unsigned &PhiNotMatchedCount) {
3542     // Matched and PhiNodesToMatch iterate their elements in a deterministic
3543     // order, so the replacements (ReplacePhi) are also done in a deterministic
3544     // order.
3545     SmallSetVector<PHIPair, 8> Matched;
3546     SmallPtrSet<PHINode *, 8> WillNotMatch;
3547     PhiNodeSet &PhiNodesToMatch = ST.newPhiNodes();
3548     while (PhiNodesToMatch.size()) {
3549       PHINode *PHI = *PhiNodesToMatch.begin();
3550 
3551       // Add us, if no Phi nodes in the basic block we do not match.
3552       WillNotMatch.clear();
3553       WillNotMatch.insert(PHI);
3554 
3555       // Traverse all Phis until we found equivalent or fail to do that.
3556       bool IsMatched = false;
3557       for (auto &P : PHI->getParent()->phis()) {
3558         if (&P == PHI)
3559           continue;
3560         if ((IsMatched = MatchPhiNode(PHI, &P, Matched, PhiNodesToMatch)))
3561           break;
3562         // If it does not match, collect all Phi nodes from matcher.
3563         // if we end up with no match, them all these Phi nodes will not match
3564         // later.
3565         for (auto M : Matched)
3566           WillNotMatch.insert(M.first);
3567         Matched.clear();
3568       }
3569       if (IsMatched) {
3570         // Replace all matched values and erase them.
3571         for (auto MV : Matched)
3572           ST.ReplacePhi(MV.first, MV.second);
3573         Matched.clear();
3574         continue;
3575       }
3576       // If we are not allowed to create new nodes then bail out.
3577       if (!AllowNewPhiNodes)
3578         return false;
3579       // Just remove all seen values in matcher. They will not match anything.
3580       PhiNotMatchedCount += WillNotMatch.size();
3581       for (auto *P : WillNotMatch)
3582         PhiNodesToMatch.erase(P);
3583     }
3584     return true;
3585   }
3586   /// Fill the placeholders with values from predecessors and simplify them.
3587   void FillPlaceholders(FoldAddrToValueMapping &Map,
3588                         SmallVectorImpl<Value *> &TraverseOrder,
3589                         SimplificationTracker &ST) {
3590     while (!TraverseOrder.empty()) {
3591       Value *Current = TraverseOrder.pop_back_val();
3592       assert(Map.find(Current) != Map.end() && "No node to fill!!!");
3593       Value *V = Map[Current];
3594 
3595       if (SelectInst *Select = dyn_cast<SelectInst>(V)) {
3596         // CurrentValue also must be Select.
3597         auto *CurrentSelect = cast<SelectInst>(Current);
3598         auto *TrueValue = CurrentSelect->getTrueValue();
3599         assert(Map.find(TrueValue) != Map.end() && "No True Value!");
3600         Select->setTrueValue(ST.Get(Map[TrueValue]));
3601         auto *FalseValue = CurrentSelect->getFalseValue();
3602         assert(Map.find(FalseValue) != Map.end() && "No False Value!");
3603         Select->setFalseValue(ST.Get(Map[FalseValue]));
3604       } else {
3605         // Must be a Phi node then.
3606         auto *PHI = cast<PHINode>(V);
3607         // Fill the Phi node with values from predecessors.
3608         for (auto *B : predecessors(PHI->getParent())) {
3609           Value *PV = cast<PHINode>(Current)->getIncomingValueForBlock(B);
3610           assert(Map.find(PV) != Map.end() && "No predecessor Value!");
3611           PHI->addIncoming(ST.Get(Map[PV]), B);
3612         }
3613       }
3614       Map[Current] = ST.Simplify(V);
3615     }
3616   }
3617 
3618   /// Starting from original value recursively iterates over def-use chain up to
3619   /// known ending values represented in a map. For each traversed phi/select
3620   /// inserts a placeholder Phi or Select.
3621   /// Reports all new created Phi/Select nodes by adding them to set.
3622   /// Also reports and order in what values have been traversed.
3623   void InsertPlaceholders(FoldAddrToValueMapping &Map,
3624                           SmallVectorImpl<Value *> &TraverseOrder,
3625                           SimplificationTracker &ST) {
3626     SmallVector<Value *, 32> Worklist;
3627     assert((isa<PHINode>(Original) || isa<SelectInst>(Original)) &&
3628            "Address must be a Phi or Select node");
3629     auto *Dummy = UndefValue::get(CommonType);
3630     Worklist.push_back(Original);
3631     while (!Worklist.empty()) {
3632       Value *Current = Worklist.pop_back_val();
3633       // if it is already visited or it is an ending value then skip it.
3634       if (Map.find(Current) != Map.end())
3635         continue;
3636       TraverseOrder.push_back(Current);
3637 
3638       // CurrentValue must be a Phi node or select. All others must be covered
3639       // by anchors.
3640       if (SelectInst *CurrentSelect = dyn_cast<SelectInst>(Current)) {
3641         // Is it OK to get metadata from OrigSelect?!
3642         // Create a Select placeholder with dummy value.
3643         SelectInst *Select = SelectInst::Create(
3644             CurrentSelect->getCondition(), Dummy, Dummy,
3645             CurrentSelect->getName(), CurrentSelect, CurrentSelect);
3646         Map[Current] = Select;
3647         ST.insertNewSelect(Select);
3648         // We are interested in True and False values.
3649         Worklist.push_back(CurrentSelect->getTrueValue());
3650         Worklist.push_back(CurrentSelect->getFalseValue());
3651       } else {
3652         // It must be a Phi node then.
3653         PHINode *CurrentPhi = cast<PHINode>(Current);
3654         unsigned PredCount = CurrentPhi->getNumIncomingValues();
3655         PHINode *PHI =
3656             PHINode::Create(CommonType, PredCount, "sunk_phi", CurrentPhi);
3657         Map[Current] = PHI;
3658         ST.insertNewPhi(PHI);
3659         for (Value *P : CurrentPhi->incoming_values())
3660           Worklist.push_back(P);
3661       }
3662     }
3663   }
3664 
3665   bool addrModeCombiningAllowed() {
3666     if (DisableComplexAddrModes)
3667       return false;
3668     switch (DifferentField) {
3669     default:
3670       return false;
3671     case ExtAddrMode::BaseRegField:
3672       return AddrSinkCombineBaseReg;
3673     case ExtAddrMode::BaseGVField:
3674       return AddrSinkCombineBaseGV;
3675     case ExtAddrMode::BaseOffsField:
3676       return AddrSinkCombineBaseOffs;
3677     case ExtAddrMode::ScaledRegField:
3678       return AddrSinkCombineScaledReg;
3679     }
3680   }
3681 };
3682 } // end anonymous namespace
3683 
3684 /// Try adding ScaleReg*Scale to the current addressing mode.
3685 /// Return true and update AddrMode if this addr mode is legal for the target,
3686 /// false if not.
3687 bool AddressingModeMatcher::matchScaledValue(Value *ScaleReg, int64_t Scale,
3688                                              unsigned Depth) {
3689   // If Scale is 1, then this is the same as adding ScaleReg to the addressing
3690   // mode.  Just process that directly.
3691   if (Scale == 1)
3692     return matchAddr(ScaleReg, Depth);
3693 
3694   // If the scale is 0, it takes nothing to add this.
3695   if (Scale == 0)
3696     return true;
3697 
3698   // If we already have a scale of this value, we can add to it, otherwise, we
3699   // need an available scale field.
3700   if (AddrMode.Scale != 0 && AddrMode.ScaledReg != ScaleReg)
3701     return false;
3702 
3703   ExtAddrMode TestAddrMode = AddrMode;
3704 
3705   // Add scale to turn X*4+X*3 -> X*7.  This could also do things like
3706   // [A+B + A*7] -> [B+A*8].
3707   TestAddrMode.Scale += Scale;
3708   TestAddrMode.ScaledReg = ScaleReg;
3709 
3710   // If the new address isn't legal, bail out.
3711   if (!TLI.isLegalAddressingMode(DL, TestAddrMode, AccessTy, AddrSpace))
3712     return false;
3713 
3714   // It was legal, so commit it.
3715   AddrMode = TestAddrMode;
3716 
3717   // Okay, we decided that we can add ScaleReg+Scale to AddrMode.  Check now
3718   // to see if ScaleReg is actually X+C.  If so, we can turn this into adding
3719   // X*Scale + C*Scale to addr mode.
3720   ConstantInt *CI = nullptr; Value *AddLHS = nullptr;
3721   if (isa<Instruction>(ScaleReg) &&  // not a constant expr.
3722       match(ScaleReg, m_Add(m_Value(AddLHS), m_ConstantInt(CI))) &&
3723       CI->getValue().isSignedIntN(64)) {
3724     TestAddrMode.InBounds = false;
3725     TestAddrMode.ScaledReg = AddLHS;
3726     TestAddrMode.BaseOffs += CI->getSExtValue() * TestAddrMode.Scale;
3727 
3728     // If this addressing mode is legal, commit it and remember that we folded
3729     // this instruction.
3730     if (TLI.isLegalAddressingMode(DL, TestAddrMode, AccessTy, AddrSpace)) {
3731       AddrModeInsts.push_back(cast<Instruction>(ScaleReg));
3732       AddrMode = TestAddrMode;
3733       return true;
3734     }
3735   }
3736 
3737   // Otherwise, not (x+c)*scale, just return what we have.
3738   return true;
3739 }
3740 
3741 /// This is a little filter, which returns true if an addressing computation
3742 /// involving I might be folded into a load/store accessing it.
3743 /// This doesn't need to be perfect, but needs to accept at least
3744 /// the set of instructions that MatchOperationAddr can.
3745 static bool MightBeFoldableInst(Instruction *I) {
3746   switch (I->getOpcode()) {
3747   case Instruction::BitCast:
3748   case Instruction::AddrSpaceCast:
3749     // Don't touch identity bitcasts.
3750     if (I->getType() == I->getOperand(0)->getType())
3751       return false;
3752     return I->getType()->isIntOrPtrTy();
3753   case Instruction::PtrToInt:
3754     // PtrToInt is always a noop, as we know that the int type is pointer sized.
3755     return true;
3756   case Instruction::IntToPtr:
3757     // We know the input is intptr_t, so this is foldable.
3758     return true;
3759   case Instruction::Add:
3760     return true;
3761   case Instruction::Mul:
3762   case Instruction::Shl:
3763     // Can only handle X*C and X << C.
3764     return isa<ConstantInt>(I->getOperand(1));
3765   case Instruction::GetElementPtr:
3766     return true;
3767   default:
3768     return false;
3769   }
3770 }
3771 
3772 /// Check whether or not \p Val is a legal instruction for \p TLI.
3773 /// \note \p Val is assumed to be the product of some type promotion.
3774 /// Therefore if \p Val has an undefined state in \p TLI, this is assumed
3775 /// to be legal, as the non-promoted value would have had the same state.
3776 static bool isPromotedInstructionLegal(const TargetLowering &TLI,
3777                                        const DataLayout &DL, Value *Val) {
3778   Instruction *PromotedInst = dyn_cast<Instruction>(Val);
3779   if (!PromotedInst)
3780     return false;
3781   int ISDOpcode = TLI.InstructionOpcodeToISD(PromotedInst->getOpcode());
3782   // If the ISDOpcode is undefined, it was undefined before the promotion.
3783   if (!ISDOpcode)
3784     return true;
3785   // Otherwise, check if the promoted instruction is legal or not.
3786   return TLI.isOperationLegalOrCustom(
3787       ISDOpcode, TLI.getValueType(DL, PromotedInst->getType()));
3788 }
3789 
3790 namespace {
3791 
3792 /// Hepler class to perform type promotion.
3793 class TypePromotionHelper {
3794   /// Utility function to add a promoted instruction \p ExtOpnd to
3795   /// \p PromotedInsts and record the type of extension we have seen.
3796   static void addPromotedInst(InstrToOrigTy &PromotedInsts,
3797                               Instruction *ExtOpnd,
3798                               bool IsSExt) {
3799     ExtType ExtTy = IsSExt ? SignExtension : ZeroExtension;
3800     InstrToOrigTy::iterator It = PromotedInsts.find(ExtOpnd);
3801     if (It != PromotedInsts.end()) {
3802       // If the new extension is same as original, the information in
3803       // PromotedInsts[ExtOpnd] is still correct.
3804       if (It->second.getInt() == ExtTy)
3805         return;
3806 
3807       // Now the new extension is different from old extension, we make
3808       // the type information invalid by setting extension type to
3809       // BothExtension.
3810       ExtTy = BothExtension;
3811     }
3812     PromotedInsts[ExtOpnd] = TypeIsSExt(ExtOpnd->getType(), ExtTy);
3813   }
3814 
3815   /// Utility function to query the original type of instruction \p Opnd
3816   /// with a matched extension type. If the extension doesn't match, we
3817   /// cannot use the information we had on the original type.
3818   /// BothExtension doesn't match any extension type.
3819   static const Type *getOrigType(const InstrToOrigTy &PromotedInsts,
3820                                  Instruction *Opnd,
3821                                  bool IsSExt) {
3822     ExtType ExtTy = IsSExt ? SignExtension : ZeroExtension;
3823     InstrToOrigTy::const_iterator It = PromotedInsts.find(Opnd);
3824     if (It != PromotedInsts.end() && It->second.getInt() == ExtTy)
3825       return It->second.getPointer();
3826     return nullptr;
3827   }
3828 
3829   /// Utility function to check whether or not a sign or zero extension
3830   /// of \p Inst with \p ConsideredExtType can be moved through \p Inst by
3831   /// either using the operands of \p Inst or promoting \p Inst.
3832   /// The type of the extension is defined by \p IsSExt.
3833   /// In other words, check if:
3834   /// ext (Ty Inst opnd1 opnd2 ... opndN) to ConsideredExtType.
3835   /// #1 Promotion applies:
3836   /// ConsideredExtType Inst (ext opnd1 to ConsideredExtType, ...).
3837   /// #2 Operand reuses:
3838   /// ext opnd1 to ConsideredExtType.
3839   /// \p PromotedInsts maps the instructions to their type before promotion.
3840   static bool canGetThrough(const Instruction *Inst, Type *ConsideredExtType,
3841                             const InstrToOrigTy &PromotedInsts, bool IsSExt);
3842 
3843   /// Utility function to determine if \p OpIdx should be promoted when
3844   /// promoting \p Inst.
3845   static bool shouldExtOperand(const Instruction *Inst, int OpIdx) {
3846     return !(isa<SelectInst>(Inst) && OpIdx == 0);
3847   }
3848 
3849   /// Utility function to promote the operand of \p Ext when this
3850   /// operand is a promotable trunc or sext or zext.
3851   /// \p PromotedInsts maps the instructions to their type before promotion.
3852   /// \p CreatedInstsCost[out] contains the cost of all instructions
3853   /// created to promote the operand of Ext.
3854   /// Newly added extensions are inserted in \p Exts.
3855   /// Newly added truncates are inserted in \p Truncs.
3856   /// Should never be called directly.
3857   /// \return The promoted value which is used instead of Ext.
3858   static Value *promoteOperandForTruncAndAnyExt(
3859       Instruction *Ext, TypePromotionTransaction &TPT,
3860       InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost,
3861       SmallVectorImpl<Instruction *> *Exts,
3862       SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI);
3863 
3864   /// Utility function to promote the operand of \p Ext when this
3865   /// operand is promotable and is not a supported trunc or sext.
3866   /// \p PromotedInsts maps the instructions to their type before promotion.
3867   /// \p CreatedInstsCost[out] contains the cost of all the instructions
3868   /// created to promote the operand of Ext.
3869   /// Newly added extensions are inserted in \p Exts.
3870   /// Newly added truncates are inserted in \p Truncs.
3871   /// Should never be called directly.
3872   /// \return The promoted value which is used instead of Ext.
3873   static Value *promoteOperandForOther(Instruction *Ext,
3874                                        TypePromotionTransaction &TPT,
3875                                        InstrToOrigTy &PromotedInsts,
3876                                        unsigned &CreatedInstsCost,
3877                                        SmallVectorImpl<Instruction *> *Exts,
3878                                        SmallVectorImpl<Instruction *> *Truncs,
3879                                        const TargetLowering &TLI, bool IsSExt);
3880 
3881   /// \see promoteOperandForOther.
3882   static Value *signExtendOperandForOther(
3883       Instruction *Ext, TypePromotionTransaction &TPT,
3884       InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost,
3885       SmallVectorImpl<Instruction *> *Exts,
3886       SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI) {
3887     return promoteOperandForOther(Ext, TPT, PromotedInsts, CreatedInstsCost,
3888                                   Exts, Truncs, TLI, true);
3889   }
3890 
3891   /// \see promoteOperandForOther.
3892   static Value *zeroExtendOperandForOther(
3893       Instruction *Ext, TypePromotionTransaction &TPT,
3894       InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost,
3895       SmallVectorImpl<Instruction *> *Exts,
3896       SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI) {
3897     return promoteOperandForOther(Ext, TPT, PromotedInsts, CreatedInstsCost,
3898                                   Exts, Truncs, TLI, false);
3899   }
3900 
3901 public:
3902   /// Type for the utility function that promotes the operand of Ext.
3903   using Action = Value *(*)(Instruction *Ext, TypePromotionTransaction &TPT,
3904                             InstrToOrigTy &PromotedInsts,
3905                             unsigned &CreatedInstsCost,
3906                             SmallVectorImpl<Instruction *> *Exts,
3907                             SmallVectorImpl<Instruction *> *Truncs,
3908                             const TargetLowering &TLI);
3909 
3910   /// Given a sign/zero extend instruction \p Ext, return the appropriate
3911   /// action to promote the operand of \p Ext instead of using Ext.
3912   /// \return NULL if no promotable action is possible with the current
3913   /// sign extension.
3914   /// \p InsertedInsts keeps track of all the instructions inserted by the
3915   /// other CodeGenPrepare optimizations. This information is important
3916   /// because we do not want to promote these instructions as CodeGenPrepare
3917   /// will reinsert them later. Thus creating an infinite loop: create/remove.
3918   /// \p PromotedInsts maps the instructions to their type before promotion.
3919   static Action getAction(Instruction *Ext, const SetOfInstrs &InsertedInsts,
3920                           const TargetLowering &TLI,
3921                           const InstrToOrigTy &PromotedInsts);
3922 };
3923 
3924 } // end anonymous namespace
3925 
3926 bool TypePromotionHelper::canGetThrough(const Instruction *Inst,
3927                                         Type *ConsideredExtType,
3928                                         const InstrToOrigTy &PromotedInsts,
3929                                         bool IsSExt) {
3930   // The promotion helper does not know how to deal with vector types yet.
3931   // To be able to fix that, we would need to fix the places where we
3932   // statically extend, e.g., constants and such.
3933   if (Inst->getType()->isVectorTy())
3934     return false;
3935 
3936   // We can always get through zext.
3937   if (isa<ZExtInst>(Inst))
3938     return true;
3939 
3940   // sext(sext) is ok too.
3941   if (IsSExt && isa<SExtInst>(Inst))
3942     return true;
3943 
3944   // We can get through binary operator, if it is legal. In other words, the
3945   // binary operator must have a nuw or nsw flag.
3946   const BinaryOperator *BinOp = dyn_cast<BinaryOperator>(Inst);
3947   if (isa_and_nonnull<OverflowingBinaryOperator>(BinOp) &&
3948       ((!IsSExt && BinOp->hasNoUnsignedWrap()) ||
3949        (IsSExt && BinOp->hasNoSignedWrap())))
3950     return true;
3951 
3952   // ext(and(opnd, cst)) --> and(ext(opnd), ext(cst))
3953   if ((Inst->getOpcode() == Instruction::And ||
3954        Inst->getOpcode() == Instruction::Or))
3955     return true;
3956 
3957   // ext(xor(opnd, cst)) --> xor(ext(opnd), ext(cst))
3958   if (Inst->getOpcode() == Instruction::Xor) {
3959     const ConstantInt *Cst = dyn_cast<ConstantInt>(Inst->getOperand(1));
3960     // Make sure it is not a NOT.
3961     if (Cst && !Cst->getValue().isAllOnesValue())
3962       return true;
3963   }
3964 
3965   // zext(shrl(opnd, cst)) --> shrl(zext(opnd), zext(cst))
3966   // It may change a poisoned value into a regular value, like
3967   //     zext i32 (shrl i8 %val, 12)  -->  shrl i32 (zext i8 %val), 12
3968   //          poisoned value                    regular value
3969   // It should be OK since undef covers valid value.
3970   if (Inst->getOpcode() == Instruction::LShr && !IsSExt)
3971     return true;
3972 
3973   // and(ext(shl(opnd, cst)), cst) --> and(shl(ext(opnd), ext(cst)), cst)
3974   // It may change a poisoned value into a regular value, like
3975   //     zext i32 (shl i8 %val, 12)  -->  shl i32 (zext i8 %val), 12
3976   //          poisoned value                    regular value
3977   // It should be OK since undef covers valid value.
3978   if (Inst->getOpcode() == Instruction::Shl && Inst->hasOneUse()) {
3979     const auto *ExtInst = cast<const Instruction>(*Inst->user_begin());
3980     if (ExtInst->hasOneUse()) {
3981       const auto *AndInst = dyn_cast<const Instruction>(*ExtInst->user_begin());
3982       if (AndInst && AndInst->getOpcode() == Instruction::And) {
3983         const auto *Cst = dyn_cast<ConstantInt>(AndInst->getOperand(1));
3984         if (Cst &&
3985             Cst->getValue().isIntN(Inst->getType()->getIntegerBitWidth()))
3986           return true;
3987       }
3988     }
3989   }
3990 
3991   // Check if we can do the following simplification.
3992   // ext(trunc(opnd)) --> ext(opnd)
3993   if (!isa<TruncInst>(Inst))
3994     return false;
3995 
3996   Value *OpndVal = Inst->getOperand(0);
3997   // Check if we can use this operand in the extension.
3998   // If the type is larger than the result type of the extension, we cannot.
3999   if (!OpndVal->getType()->isIntegerTy() ||
4000       OpndVal->getType()->getIntegerBitWidth() >
4001           ConsideredExtType->getIntegerBitWidth())
4002     return false;
4003 
4004   // If the operand of the truncate is not an instruction, we will not have
4005   // any information on the dropped bits.
4006   // (Actually we could for constant but it is not worth the extra logic).
4007   Instruction *Opnd = dyn_cast<Instruction>(OpndVal);
4008   if (!Opnd)
4009     return false;
4010 
4011   // Check if the source of the type is narrow enough.
4012   // I.e., check that trunc just drops extended bits of the same kind of
4013   // the extension.
4014   // #1 get the type of the operand and check the kind of the extended bits.
4015   const Type *OpndType = getOrigType(PromotedInsts, Opnd, IsSExt);
4016   if (OpndType)
4017     ;
4018   else if ((IsSExt && isa<SExtInst>(Opnd)) || (!IsSExt && isa<ZExtInst>(Opnd)))
4019     OpndType = Opnd->getOperand(0)->getType();
4020   else
4021     return false;
4022 
4023   // #2 check that the truncate just drops extended bits.
4024   return Inst->getType()->getIntegerBitWidth() >=
4025          OpndType->getIntegerBitWidth();
4026 }
4027 
4028 TypePromotionHelper::Action TypePromotionHelper::getAction(
4029     Instruction *Ext, const SetOfInstrs &InsertedInsts,
4030     const TargetLowering &TLI, const InstrToOrigTy &PromotedInsts) {
4031   assert((isa<SExtInst>(Ext) || isa<ZExtInst>(Ext)) &&
4032          "Unexpected instruction type");
4033   Instruction *ExtOpnd = dyn_cast<Instruction>(Ext->getOperand(0));
4034   Type *ExtTy = Ext->getType();
4035   bool IsSExt = isa<SExtInst>(Ext);
4036   // If the operand of the extension is not an instruction, we cannot
4037   // get through.
4038   // If it, check we can get through.
4039   if (!ExtOpnd || !canGetThrough(ExtOpnd, ExtTy, PromotedInsts, IsSExt))
4040     return nullptr;
4041 
4042   // Do not promote if the operand has been added by codegenprepare.
4043   // Otherwise, it means we are undoing an optimization that is likely to be
4044   // redone, thus causing potential infinite loop.
4045   if (isa<TruncInst>(ExtOpnd) && InsertedInsts.count(ExtOpnd))
4046     return nullptr;
4047 
4048   // SExt or Trunc instructions.
4049   // Return the related handler.
4050   if (isa<SExtInst>(ExtOpnd) || isa<TruncInst>(ExtOpnd) ||
4051       isa<ZExtInst>(ExtOpnd))
4052     return promoteOperandForTruncAndAnyExt;
4053 
4054   // Regular instruction.
4055   // Abort early if we will have to insert non-free instructions.
4056   if (!ExtOpnd->hasOneUse() && !TLI.isTruncateFree(ExtTy, ExtOpnd->getType()))
4057     return nullptr;
4058   return IsSExt ? signExtendOperandForOther : zeroExtendOperandForOther;
4059 }
4060 
4061 Value *TypePromotionHelper::promoteOperandForTruncAndAnyExt(
4062     Instruction *SExt, TypePromotionTransaction &TPT,
4063     InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost,
4064     SmallVectorImpl<Instruction *> *Exts,
4065     SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI) {
4066   // By construction, the operand of SExt is an instruction. Otherwise we cannot
4067   // get through it and this method should not be called.
4068   Instruction *SExtOpnd = cast<Instruction>(SExt->getOperand(0));
4069   Value *ExtVal = SExt;
4070   bool HasMergedNonFreeExt = false;
4071   if (isa<ZExtInst>(SExtOpnd)) {
4072     // Replace s|zext(zext(opnd))
4073     // => zext(opnd).
4074     HasMergedNonFreeExt = !TLI.isExtFree(SExtOpnd);
4075     Value *ZExt =
4076         TPT.createZExt(SExt, SExtOpnd->getOperand(0), SExt->getType());
4077     TPT.replaceAllUsesWith(SExt, ZExt);
4078     TPT.eraseInstruction(SExt);
4079     ExtVal = ZExt;
4080   } else {
4081     // Replace z|sext(trunc(opnd)) or sext(sext(opnd))
4082     // => z|sext(opnd).
4083     TPT.setOperand(SExt, 0, SExtOpnd->getOperand(0));
4084   }
4085   CreatedInstsCost = 0;
4086 
4087   // Remove dead code.
4088   if (SExtOpnd->use_empty())
4089     TPT.eraseInstruction(SExtOpnd);
4090 
4091   // Check if the extension is still needed.
4092   Instruction *ExtInst = dyn_cast<Instruction>(ExtVal);
4093   if (!ExtInst || ExtInst->getType() != ExtInst->getOperand(0)->getType()) {
4094     if (ExtInst) {
4095       if (Exts)
4096         Exts->push_back(ExtInst);
4097       CreatedInstsCost = !TLI.isExtFree(ExtInst) && !HasMergedNonFreeExt;
4098     }
4099     return ExtVal;
4100   }
4101 
4102   // At this point we have: ext ty opnd to ty.
4103   // Reassign the uses of ExtInst to the opnd and remove ExtInst.
4104   Value *NextVal = ExtInst->getOperand(0);
4105   TPT.eraseInstruction(ExtInst, NextVal);
4106   return NextVal;
4107 }
4108 
4109 Value *TypePromotionHelper::promoteOperandForOther(
4110     Instruction *Ext, TypePromotionTransaction &TPT,
4111     InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost,
4112     SmallVectorImpl<Instruction *> *Exts,
4113     SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI,
4114     bool IsSExt) {
4115   // By construction, the operand of Ext is an instruction. Otherwise we cannot
4116   // get through it and this method should not be called.
4117   Instruction *ExtOpnd = cast<Instruction>(Ext->getOperand(0));
4118   CreatedInstsCost = 0;
4119   if (!ExtOpnd->hasOneUse()) {
4120     // ExtOpnd will be promoted.
4121     // All its uses, but Ext, will need to use a truncated value of the
4122     // promoted version.
4123     // Create the truncate now.
4124     Value *Trunc = TPT.createTrunc(Ext, ExtOpnd->getType());
4125     if (Instruction *ITrunc = dyn_cast<Instruction>(Trunc)) {
4126       // Insert it just after the definition.
4127       ITrunc->moveAfter(ExtOpnd);
4128       if (Truncs)
4129         Truncs->push_back(ITrunc);
4130     }
4131 
4132     TPT.replaceAllUsesWith(ExtOpnd, Trunc);
4133     // Restore the operand of Ext (which has been replaced by the previous call
4134     // to replaceAllUsesWith) to avoid creating a cycle trunc <-> sext.
4135     TPT.setOperand(Ext, 0, ExtOpnd);
4136   }
4137 
4138   // Get through the Instruction:
4139   // 1. Update its type.
4140   // 2. Replace the uses of Ext by Inst.
4141   // 3. Extend each operand that needs to be extended.
4142 
4143   // Remember the original type of the instruction before promotion.
4144   // This is useful to know that the high bits are sign extended bits.
4145   addPromotedInst(PromotedInsts, ExtOpnd, IsSExt);
4146   // Step #1.
4147   TPT.mutateType(ExtOpnd, Ext->getType());
4148   // Step #2.
4149   TPT.replaceAllUsesWith(Ext, ExtOpnd);
4150   // Step #3.
4151   Instruction *ExtForOpnd = Ext;
4152 
4153   LLVM_DEBUG(dbgs() << "Propagate Ext to operands\n");
4154   for (int OpIdx = 0, EndOpIdx = ExtOpnd->getNumOperands(); OpIdx != EndOpIdx;
4155        ++OpIdx) {
4156     LLVM_DEBUG(dbgs() << "Operand:\n" << *(ExtOpnd->getOperand(OpIdx)) << '\n');
4157     if (ExtOpnd->getOperand(OpIdx)->getType() == Ext->getType() ||
4158         !shouldExtOperand(ExtOpnd, OpIdx)) {
4159       LLVM_DEBUG(dbgs() << "No need to propagate\n");
4160       continue;
4161     }
4162     // Check if we can statically extend the operand.
4163     Value *Opnd = ExtOpnd->getOperand(OpIdx);
4164     if (const ConstantInt *Cst = dyn_cast<ConstantInt>(Opnd)) {
4165       LLVM_DEBUG(dbgs() << "Statically extend\n");
4166       unsigned BitWidth = Ext->getType()->getIntegerBitWidth();
4167       APInt CstVal = IsSExt ? Cst->getValue().sext(BitWidth)
4168                             : Cst->getValue().zext(BitWidth);
4169       TPT.setOperand(ExtOpnd, OpIdx, ConstantInt::get(Ext->getType(), CstVal));
4170       continue;
4171     }
4172     // UndefValue are typed, so we have to statically sign extend them.
4173     if (isa<UndefValue>(Opnd)) {
4174       LLVM_DEBUG(dbgs() << "Statically extend\n");
4175       TPT.setOperand(ExtOpnd, OpIdx, UndefValue::get(Ext->getType()));
4176       continue;
4177     }
4178 
4179     // Otherwise we have to explicitly sign extend the operand.
4180     // Check if Ext was reused to extend an operand.
4181     if (!ExtForOpnd) {
4182       // If yes, create a new one.
4183       LLVM_DEBUG(dbgs() << "More operands to ext\n");
4184       Value *ValForExtOpnd = IsSExt ? TPT.createSExt(Ext, Opnd, Ext->getType())
4185         : TPT.createZExt(Ext, Opnd, Ext->getType());
4186       if (!isa<Instruction>(ValForExtOpnd)) {
4187         TPT.setOperand(ExtOpnd, OpIdx, ValForExtOpnd);
4188         continue;
4189       }
4190       ExtForOpnd = cast<Instruction>(ValForExtOpnd);
4191     }
4192     if (Exts)
4193       Exts->push_back(ExtForOpnd);
4194     TPT.setOperand(ExtForOpnd, 0, Opnd);
4195 
4196     // Move the sign extension before the insertion point.
4197     TPT.moveBefore(ExtForOpnd, ExtOpnd);
4198     TPT.setOperand(ExtOpnd, OpIdx, ExtForOpnd);
4199     CreatedInstsCost += !TLI.isExtFree(ExtForOpnd);
4200     // If more sext are required, new instructions will have to be created.
4201     ExtForOpnd = nullptr;
4202   }
4203   if (ExtForOpnd == Ext) {
4204     LLVM_DEBUG(dbgs() << "Extension is useless now\n");
4205     TPT.eraseInstruction(Ext);
4206   }
4207   return ExtOpnd;
4208 }
4209 
4210 /// Check whether or not promoting an instruction to a wider type is profitable.
4211 /// \p NewCost gives the cost of extension instructions created by the
4212 /// promotion.
4213 /// \p OldCost gives the cost of extension instructions before the promotion
4214 /// plus the number of instructions that have been
4215 /// matched in the addressing mode the promotion.
4216 /// \p PromotedOperand is the value that has been promoted.
4217 /// \return True if the promotion is profitable, false otherwise.
4218 bool AddressingModeMatcher::isPromotionProfitable(
4219     unsigned NewCost, unsigned OldCost, Value *PromotedOperand) const {
4220   LLVM_DEBUG(dbgs() << "OldCost: " << OldCost << "\tNewCost: " << NewCost
4221                     << '\n');
4222   // The cost of the new extensions is greater than the cost of the
4223   // old extension plus what we folded.
4224   // This is not profitable.
4225   if (NewCost > OldCost)
4226     return false;
4227   if (NewCost < OldCost)
4228     return true;
4229   // The promotion is neutral but it may help folding the sign extension in
4230   // loads for instance.
4231   // Check that we did not create an illegal instruction.
4232   return isPromotedInstructionLegal(TLI, DL, PromotedOperand);
4233 }
4234 
4235 /// Given an instruction or constant expr, see if we can fold the operation
4236 /// into the addressing mode. If so, update the addressing mode and return
4237 /// true, otherwise return false without modifying AddrMode.
4238 /// If \p MovedAway is not NULL, it contains the information of whether or
4239 /// not AddrInst has to be folded into the addressing mode on success.
4240 /// If \p MovedAway == true, \p AddrInst will not be part of the addressing
4241 /// because it has been moved away.
4242 /// Thus AddrInst must not be added in the matched instructions.
4243 /// This state can happen when AddrInst is a sext, since it may be moved away.
4244 /// Therefore, AddrInst may not be valid when MovedAway is true and it must
4245 /// not be referenced anymore.
4246 bool AddressingModeMatcher::matchOperationAddr(User *AddrInst, unsigned Opcode,
4247                                                unsigned Depth,
4248                                                bool *MovedAway) {
4249   // Avoid exponential behavior on extremely deep expression trees.
4250   if (Depth >= 5) return false;
4251 
4252   // By default, all matched instructions stay in place.
4253   if (MovedAway)
4254     *MovedAway = false;
4255 
4256   switch (Opcode) {
4257   case Instruction::PtrToInt:
4258     // PtrToInt is always a noop, as we know that the int type is pointer sized.
4259     return matchAddr(AddrInst->getOperand(0), Depth);
4260   case Instruction::IntToPtr: {
4261     auto AS = AddrInst->getType()->getPointerAddressSpace();
4262     auto PtrTy = MVT::getIntegerVT(DL.getPointerSizeInBits(AS));
4263     // This inttoptr is a no-op if the integer type is pointer sized.
4264     if (TLI.getValueType(DL, AddrInst->getOperand(0)->getType()) == PtrTy)
4265       return matchAddr(AddrInst->getOperand(0), Depth);
4266     return false;
4267   }
4268   case Instruction::BitCast:
4269     // BitCast is always a noop, and we can handle it as long as it is
4270     // int->int or pointer->pointer (we don't want int<->fp or something).
4271     if (AddrInst->getOperand(0)->getType()->isIntOrPtrTy() &&
4272         // Don't touch identity bitcasts.  These were probably put here by LSR,
4273         // and we don't want to mess around with them.  Assume it knows what it
4274         // is doing.
4275         AddrInst->getOperand(0)->getType() != AddrInst->getType())
4276       return matchAddr(AddrInst->getOperand(0), Depth);
4277     return false;
4278   case Instruction::AddrSpaceCast: {
4279     unsigned SrcAS
4280       = AddrInst->getOperand(0)->getType()->getPointerAddressSpace();
4281     unsigned DestAS = AddrInst->getType()->getPointerAddressSpace();
4282     if (TLI.isNoopAddrSpaceCast(SrcAS, DestAS))
4283       return matchAddr(AddrInst->getOperand(0), Depth);
4284     return false;
4285   }
4286   case Instruction::Add: {
4287     // Check to see if we can merge in the RHS then the LHS.  If so, we win.
4288     ExtAddrMode BackupAddrMode = AddrMode;
4289     unsigned OldSize = AddrModeInsts.size();
4290     // Start a transaction at this point.
4291     // The LHS may match but not the RHS.
4292     // Therefore, we need a higher level restoration point to undo partially
4293     // matched operation.
4294     TypePromotionTransaction::ConstRestorationPt LastKnownGood =
4295         TPT.getRestorationPoint();
4296 
4297     AddrMode.InBounds = false;
4298     if (matchAddr(AddrInst->getOperand(1), Depth+1) &&
4299         matchAddr(AddrInst->getOperand(0), Depth+1))
4300       return true;
4301 
4302     // Restore the old addr mode info.
4303     AddrMode = BackupAddrMode;
4304     AddrModeInsts.resize(OldSize);
4305     TPT.rollback(LastKnownGood);
4306 
4307     // Otherwise this was over-aggressive.  Try merging in the LHS then the RHS.
4308     if (matchAddr(AddrInst->getOperand(0), Depth+1) &&
4309         matchAddr(AddrInst->getOperand(1), Depth+1))
4310       return true;
4311 
4312     // Otherwise we definitely can't merge the ADD in.
4313     AddrMode = BackupAddrMode;
4314     AddrModeInsts.resize(OldSize);
4315     TPT.rollback(LastKnownGood);
4316     break;
4317   }
4318   //case Instruction::Or:
4319   // TODO: We can handle "Or Val, Imm" iff this OR is equivalent to an ADD.
4320   //break;
4321   case Instruction::Mul:
4322   case Instruction::Shl: {
4323     // Can only handle X*C and X << C.
4324     AddrMode.InBounds = false;
4325     ConstantInt *RHS = dyn_cast<ConstantInt>(AddrInst->getOperand(1));
4326     if (!RHS || RHS->getBitWidth() > 64)
4327       return false;
4328     int64_t Scale = RHS->getSExtValue();
4329     if (Opcode == Instruction::Shl)
4330       Scale = 1LL << Scale;
4331 
4332     return matchScaledValue(AddrInst->getOperand(0), Scale, Depth);
4333   }
4334   case Instruction::GetElementPtr: {
4335     // Scan the GEP.  We check it if it contains constant offsets and at most
4336     // one variable offset.
4337     int VariableOperand = -1;
4338     unsigned VariableScale = 0;
4339 
4340     int64_t ConstantOffset = 0;
4341     gep_type_iterator GTI = gep_type_begin(AddrInst);
4342     for (unsigned i = 1, e = AddrInst->getNumOperands(); i != e; ++i, ++GTI) {
4343       if (StructType *STy = GTI.getStructTypeOrNull()) {
4344         const StructLayout *SL = DL.getStructLayout(STy);
4345         unsigned Idx =
4346           cast<ConstantInt>(AddrInst->getOperand(i))->getZExtValue();
4347         ConstantOffset += SL->getElementOffset(Idx);
4348       } else {
4349         uint64_t TypeSize = DL.getTypeAllocSize(GTI.getIndexedType());
4350         if (ConstantInt *CI = dyn_cast<ConstantInt>(AddrInst->getOperand(i))) {
4351           const APInt &CVal = CI->getValue();
4352           if (CVal.getMinSignedBits() <= 64) {
4353             ConstantOffset += CVal.getSExtValue() * TypeSize;
4354             continue;
4355           }
4356         }
4357         if (TypeSize) {  // Scales of zero don't do anything.
4358           // We only allow one variable index at the moment.
4359           if (VariableOperand != -1)
4360             return false;
4361 
4362           // Remember the variable index.
4363           VariableOperand = i;
4364           VariableScale = TypeSize;
4365         }
4366       }
4367     }
4368 
4369     // A common case is for the GEP to only do a constant offset.  In this case,
4370     // just add it to the disp field and check validity.
4371     if (VariableOperand == -1) {
4372       AddrMode.BaseOffs += ConstantOffset;
4373       if (ConstantOffset == 0 ||
4374           TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace)) {
4375         // Check to see if we can fold the base pointer in too.
4376         if (matchAddr(AddrInst->getOperand(0), Depth+1)) {
4377           if (!cast<GEPOperator>(AddrInst)->isInBounds())
4378             AddrMode.InBounds = false;
4379           return true;
4380         }
4381       } else if (EnableGEPOffsetSplit && isa<GetElementPtrInst>(AddrInst) &&
4382                  TLI.shouldConsiderGEPOffsetSplit() && Depth == 0 &&
4383                  ConstantOffset > 0) {
4384         // Record GEPs with non-zero offsets as candidates for splitting in the
4385         // event that the offset cannot fit into the r+i addressing mode.
4386         // Simple and common case that only one GEP is used in calculating the
4387         // address for the memory access.
4388         Value *Base = AddrInst->getOperand(0);
4389         auto *BaseI = dyn_cast<Instruction>(Base);
4390         auto *GEP = cast<GetElementPtrInst>(AddrInst);
4391         if (isa<Argument>(Base) || isa<GlobalValue>(Base) ||
4392             (BaseI && !isa<CastInst>(BaseI) &&
4393              !isa<GetElementPtrInst>(BaseI))) {
4394           // Make sure the parent block allows inserting non-PHI instructions
4395           // before the terminator.
4396           BasicBlock *Parent =
4397               BaseI ? BaseI->getParent() : &GEP->getFunction()->getEntryBlock();
4398           if (!Parent->getTerminator()->isEHPad())
4399             LargeOffsetGEP = std::make_pair(GEP, ConstantOffset);
4400         }
4401       }
4402       AddrMode.BaseOffs -= ConstantOffset;
4403       return false;
4404     }
4405 
4406     // Save the valid addressing mode in case we can't match.
4407     ExtAddrMode BackupAddrMode = AddrMode;
4408     unsigned OldSize = AddrModeInsts.size();
4409 
4410     // See if the scale and offset amount is valid for this target.
4411     AddrMode.BaseOffs += ConstantOffset;
4412     if (!cast<GEPOperator>(AddrInst)->isInBounds())
4413       AddrMode.InBounds = false;
4414 
4415     // Match the base operand of the GEP.
4416     if (!matchAddr(AddrInst->getOperand(0), Depth+1)) {
4417       // If it couldn't be matched, just stuff the value in a register.
4418       if (AddrMode.HasBaseReg) {
4419         AddrMode = BackupAddrMode;
4420         AddrModeInsts.resize(OldSize);
4421         return false;
4422       }
4423       AddrMode.HasBaseReg = true;
4424       AddrMode.BaseReg = AddrInst->getOperand(0);
4425     }
4426 
4427     // Match the remaining variable portion of the GEP.
4428     if (!matchScaledValue(AddrInst->getOperand(VariableOperand), VariableScale,
4429                           Depth)) {
4430       // If it couldn't be matched, try stuffing the base into a register
4431       // instead of matching it, and retrying the match of the scale.
4432       AddrMode = BackupAddrMode;
4433       AddrModeInsts.resize(OldSize);
4434       if (AddrMode.HasBaseReg)
4435         return false;
4436       AddrMode.HasBaseReg = true;
4437       AddrMode.BaseReg = AddrInst->getOperand(0);
4438       AddrMode.BaseOffs += ConstantOffset;
4439       if (!matchScaledValue(AddrInst->getOperand(VariableOperand),
4440                             VariableScale, Depth)) {
4441         // If even that didn't work, bail.
4442         AddrMode = BackupAddrMode;
4443         AddrModeInsts.resize(OldSize);
4444         return false;
4445       }
4446     }
4447 
4448     return true;
4449   }
4450   case Instruction::SExt:
4451   case Instruction::ZExt: {
4452     Instruction *Ext = dyn_cast<Instruction>(AddrInst);
4453     if (!Ext)
4454       return false;
4455 
4456     // Try to move this ext out of the way of the addressing mode.
4457     // Ask for a method for doing so.
4458     TypePromotionHelper::Action TPH =
4459         TypePromotionHelper::getAction(Ext, InsertedInsts, TLI, PromotedInsts);
4460     if (!TPH)
4461       return false;
4462 
4463     TypePromotionTransaction::ConstRestorationPt LastKnownGood =
4464         TPT.getRestorationPoint();
4465     unsigned CreatedInstsCost = 0;
4466     unsigned ExtCost = !TLI.isExtFree(Ext);
4467     Value *PromotedOperand =
4468         TPH(Ext, TPT, PromotedInsts, CreatedInstsCost, nullptr, nullptr, TLI);
4469     // SExt has been moved away.
4470     // Thus either it will be rematched later in the recursive calls or it is
4471     // gone. Anyway, we must not fold it into the addressing mode at this point.
4472     // E.g.,
4473     // op = add opnd, 1
4474     // idx = ext op
4475     // addr = gep base, idx
4476     // is now:
4477     // promotedOpnd = ext opnd            <- no match here
4478     // op = promoted_add promotedOpnd, 1  <- match (later in recursive calls)
4479     // addr = gep base, op                <- match
4480     if (MovedAway)
4481       *MovedAway = true;
4482 
4483     assert(PromotedOperand &&
4484            "TypePromotionHelper should have filtered out those cases");
4485 
4486     ExtAddrMode BackupAddrMode = AddrMode;
4487     unsigned OldSize = AddrModeInsts.size();
4488 
4489     if (!matchAddr(PromotedOperand, Depth) ||
4490         // The total of the new cost is equal to the cost of the created
4491         // instructions.
4492         // The total of the old cost is equal to the cost of the extension plus
4493         // what we have saved in the addressing mode.
4494         !isPromotionProfitable(CreatedInstsCost,
4495                                ExtCost + (AddrModeInsts.size() - OldSize),
4496                                PromotedOperand)) {
4497       AddrMode = BackupAddrMode;
4498       AddrModeInsts.resize(OldSize);
4499       LLVM_DEBUG(dbgs() << "Sign extension does not pay off: rollback\n");
4500       TPT.rollback(LastKnownGood);
4501       return false;
4502     }
4503     return true;
4504   }
4505   }
4506   return false;
4507 }
4508 
4509 /// If we can, try to add the value of 'Addr' into the current addressing mode.
4510 /// If Addr can't be added to AddrMode this returns false and leaves AddrMode
4511 /// unmodified. This assumes that Addr is either a pointer type or intptr_t
4512 /// for the target.
4513 ///
4514 bool AddressingModeMatcher::matchAddr(Value *Addr, unsigned Depth) {
4515   // Start a transaction at this point that we will rollback if the matching
4516   // fails.
4517   TypePromotionTransaction::ConstRestorationPt LastKnownGood =
4518       TPT.getRestorationPoint();
4519   if (ConstantInt *CI = dyn_cast<ConstantInt>(Addr)) {
4520     if (CI->getValue().isSignedIntN(64)) {
4521       // Fold in immediates if legal for the target.
4522       AddrMode.BaseOffs += CI->getSExtValue();
4523       if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace))
4524         return true;
4525       AddrMode.BaseOffs -= CI->getSExtValue();
4526     }
4527   } else if (GlobalValue *GV = dyn_cast<GlobalValue>(Addr)) {
4528     // If this is a global variable, try to fold it into the addressing mode.
4529     if (!AddrMode.BaseGV) {
4530       AddrMode.BaseGV = GV;
4531       if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace))
4532         return true;
4533       AddrMode.BaseGV = nullptr;
4534     }
4535   } else if (Instruction *I = dyn_cast<Instruction>(Addr)) {
4536     ExtAddrMode BackupAddrMode = AddrMode;
4537     unsigned OldSize = AddrModeInsts.size();
4538 
4539     // Check to see if it is possible to fold this operation.
4540     bool MovedAway = false;
4541     if (matchOperationAddr(I, I->getOpcode(), Depth, &MovedAway)) {
4542       // This instruction may have been moved away. If so, there is nothing
4543       // to check here.
4544       if (MovedAway)
4545         return true;
4546       // Okay, it's possible to fold this.  Check to see if it is actually
4547       // *profitable* to do so.  We use a simple cost model to avoid increasing
4548       // register pressure too much.
4549       if (I->hasOneUse() ||
4550           isProfitableToFoldIntoAddressingMode(I, BackupAddrMode, AddrMode)) {
4551         AddrModeInsts.push_back(I);
4552         return true;
4553       }
4554 
4555       // It isn't profitable to do this, roll back.
4556       //cerr << "NOT FOLDING: " << *I;
4557       AddrMode = BackupAddrMode;
4558       AddrModeInsts.resize(OldSize);
4559       TPT.rollback(LastKnownGood);
4560     }
4561   } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Addr)) {
4562     if (matchOperationAddr(CE, CE->getOpcode(), Depth))
4563       return true;
4564     TPT.rollback(LastKnownGood);
4565   } else if (isa<ConstantPointerNull>(Addr)) {
4566     // Null pointer gets folded without affecting the addressing mode.
4567     return true;
4568   }
4569 
4570   // Worse case, the target should support [reg] addressing modes. :)
4571   if (!AddrMode.HasBaseReg) {
4572     AddrMode.HasBaseReg = true;
4573     AddrMode.BaseReg = Addr;
4574     // Still check for legality in case the target supports [imm] but not [i+r].
4575     if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace))
4576       return true;
4577     AddrMode.HasBaseReg = false;
4578     AddrMode.BaseReg = nullptr;
4579   }
4580 
4581   // If the base register is already taken, see if we can do [r+r].
4582   if (AddrMode.Scale == 0) {
4583     AddrMode.Scale = 1;
4584     AddrMode.ScaledReg = Addr;
4585     if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace))
4586       return true;
4587     AddrMode.Scale = 0;
4588     AddrMode.ScaledReg = nullptr;
4589   }
4590   // Couldn't match.
4591   TPT.rollback(LastKnownGood);
4592   return false;
4593 }
4594 
4595 /// Check to see if all uses of OpVal by the specified inline asm call are due
4596 /// to memory operands. If so, return true, otherwise return false.
4597 static bool IsOperandAMemoryOperand(CallInst *CI, InlineAsm *IA, Value *OpVal,
4598                                     const TargetLowering &TLI,
4599                                     const TargetRegisterInfo &TRI) {
4600   const Function *F = CI->getFunction();
4601   TargetLowering::AsmOperandInfoVector TargetConstraints =
4602       TLI.ParseConstraints(F->getParent()->getDataLayout(), &TRI, *CI);
4603 
4604   for (unsigned i = 0, e = TargetConstraints.size(); i != e; ++i) {
4605     TargetLowering::AsmOperandInfo &OpInfo = TargetConstraints[i];
4606 
4607     // Compute the constraint code and ConstraintType to use.
4608     TLI.ComputeConstraintToUse(OpInfo, SDValue());
4609 
4610     // If this asm operand is our Value*, and if it isn't an indirect memory
4611     // operand, we can't fold it!
4612     if (OpInfo.CallOperandVal == OpVal &&
4613         (OpInfo.ConstraintType != TargetLowering::C_Memory ||
4614          !OpInfo.isIndirect))
4615       return false;
4616   }
4617 
4618   return true;
4619 }
4620 
4621 // Max number of memory uses to look at before aborting the search to conserve
4622 // compile time.
4623 static constexpr int MaxMemoryUsesToScan = 20;
4624 
4625 /// Recursively walk all the uses of I until we find a memory use.
4626 /// If we find an obviously non-foldable instruction, return true.
4627 /// Add the ultimately found memory instructions to MemoryUses.
4628 static bool FindAllMemoryUses(
4629     Instruction *I,
4630     SmallVectorImpl<std::pair<Instruction *, unsigned>> &MemoryUses,
4631     SmallPtrSetImpl<Instruction *> &ConsideredInsts, const TargetLowering &TLI,
4632     const TargetRegisterInfo &TRI, bool OptSize, ProfileSummaryInfo *PSI,
4633     BlockFrequencyInfo *BFI, int SeenInsts = 0) {
4634   // If we already considered this instruction, we're done.
4635   if (!ConsideredInsts.insert(I).second)
4636     return false;
4637 
4638   // If this is an obviously unfoldable instruction, bail out.
4639   if (!MightBeFoldableInst(I))
4640     return true;
4641 
4642   // Loop over all the uses, recursively processing them.
4643   for (Use &U : I->uses()) {
4644     // Conservatively return true if we're seeing a large number or a deep chain
4645     // of users. This avoids excessive compilation times in pathological cases.
4646     if (SeenInsts++ >= MaxMemoryUsesToScan)
4647       return true;
4648 
4649     Instruction *UserI = cast<Instruction>(U.getUser());
4650     if (LoadInst *LI = dyn_cast<LoadInst>(UserI)) {
4651       MemoryUses.push_back(std::make_pair(LI, U.getOperandNo()));
4652       continue;
4653     }
4654 
4655     if (StoreInst *SI = dyn_cast<StoreInst>(UserI)) {
4656       unsigned opNo = U.getOperandNo();
4657       if (opNo != StoreInst::getPointerOperandIndex())
4658         return true; // Storing addr, not into addr.
4659       MemoryUses.push_back(std::make_pair(SI, opNo));
4660       continue;
4661     }
4662 
4663     if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(UserI)) {
4664       unsigned opNo = U.getOperandNo();
4665       if (opNo != AtomicRMWInst::getPointerOperandIndex())
4666         return true; // Storing addr, not into addr.
4667       MemoryUses.push_back(std::make_pair(RMW, opNo));
4668       continue;
4669     }
4670 
4671     if (AtomicCmpXchgInst *CmpX = dyn_cast<AtomicCmpXchgInst>(UserI)) {
4672       unsigned opNo = U.getOperandNo();
4673       if (opNo != AtomicCmpXchgInst::getPointerOperandIndex())
4674         return true; // Storing addr, not into addr.
4675       MemoryUses.push_back(std::make_pair(CmpX, opNo));
4676       continue;
4677     }
4678 
4679     if (CallInst *CI = dyn_cast<CallInst>(UserI)) {
4680       if (CI->hasFnAttr(Attribute::Cold)) {
4681         // If this is a cold call, we can sink the addressing calculation into
4682         // the cold path.  See optimizeCallInst
4683         bool OptForSize = OptSize ||
4684           llvm::shouldOptimizeForSize(CI->getParent(), PSI, BFI);
4685         if (!OptForSize)
4686           continue;
4687       }
4688 
4689       InlineAsm *IA = dyn_cast<InlineAsm>(CI->getCalledOperand());
4690       if (!IA) return true;
4691 
4692       // If this is a memory operand, we're cool, otherwise bail out.
4693       if (!IsOperandAMemoryOperand(CI, IA, I, TLI, TRI))
4694         return true;
4695       continue;
4696     }
4697 
4698     if (FindAllMemoryUses(UserI, MemoryUses, ConsideredInsts, TLI, TRI, OptSize,
4699                           PSI, BFI, SeenInsts))
4700       return true;
4701   }
4702 
4703   return false;
4704 }
4705 
4706 /// Return true if Val is already known to be live at the use site that we're
4707 /// folding it into. If so, there is no cost to include it in the addressing
4708 /// mode. KnownLive1 and KnownLive2 are two values that we know are live at the
4709 /// instruction already.
4710 bool AddressingModeMatcher::valueAlreadyLiveAtInst(Value *Val,Value *KnownLive1,
4711                                                    Value *KnownLive2) {
4712   // If Val is either of the known-live values, we know it is live!
4713   if (Val == nullptr || Val == KnownLive1 || Val == KnownLive2)
4714     return true;
4715 
4716   // All values other than instructions and arguments (e.g. constants) are live.
4717   if (!isa<Instruction>(Val) && !isa<Argument>(Val)) return true;
4718 
4719   // If Val is a constant sized alloca in the entry block, it is live, this is
4720   // true because it is just a reference to the stack/frame pointer, which is
4721   // live for the whole function.
4722   if (AllocaInst *AI = dyn_cast<AllocaInst>(Val))
4723     if (AI->isStaticAlloca())
4724       return true;
4725 
4726   // Check to see if this value is already used in the memory instruction's
4727   // block.  If so, it's already live into the block at the very least, so we
4728   // can reasonably fold it.
4729   return Val->isUsedInBasicBlock(MemoryInst->getParent());
4730 }
4731 
4732 /// It is possible for the addressing mode of the machine to fold the specified
4733 /// instruction into a load or store that ultimately uses it.
4734 /// However, the specified instruction has multiple uses.
4735 /// Given this, it may actually increase register pressure to fold it
4736 /// into the load. For example, consider this code:
4737 ///
4738 ///     X = ...
4739 ///     Y = X+1
4740 ///     use(Y)   -> nonload/store
4741 ///     Z = Y+1
4742 ///     load Z
4743 ///
4744 /// In this case, Y has multiple uses, and can be folded into the load of Z
4745 /// (yielding load [X+2]).  However, doing this will cause both "X" and "X+1" to
4746 /// be live at the use(Y) line.  If we don't fold Y into load Z, we use one
4747 /// fewer register.  Since Y can't be folded into "use(Y)" we don't increase the
4748 /// number of computations either.
4749 ///
4750 /// Note that this (like most of CodeGenPrepare) is just a rough heuristic.  If
4751 /// X was live across 'load Z' for other reasons, we actually *would* want to
4752 /// fold the addressing mode in the Z case.  This would make Y die earlier.
4753 bool AddressingModeMatcher::
4754 isProfitableToFoldIntoAddressingMode(Instruction *I, ExtAddrMode &AMBefore,
4755                                      ExtAddrMode &AMAfter) {
4756   if (IgnoreProfitability) return true;
4757 
4758   // AMBefore is the addressing mode before this instruction was folded into it,
4759   // and AMAfter is the addressing mode after the instruction was folded.  Get
4760   // the set of registers referenced by AMAfter and subtract out those
4761   // referenced by AMBefore: this is the set of values which folding in this
4762   // address extends the lifetime of.
4763   //
4764   // Note that there are only two potential values being referenced here,
4765   // BaseReg and ScaleReg (global addresses are always available, as are any
4766   // folded immediates).
4767   Value *BaseReg = AMAfter.BaseReg, *ScaledReg = AMAfter.ScaledReg;
4768 
4769   // If the BaseReg or ScaledReg was referenced by the previous addrmode, their
4770   // lifetime wasn't extended by adding this instruction.
4771   if (valueAlreadyLiveAtInst(BaseReg, AMBefore.BaseReg, AMBefore.ScaledReg))
4772     BaseReg = nullptr;
4773   if (valueAlreadyLiveAtInst(ScaledReg, AMBefore.BaseReg, AMBefore.ScaledReg))
4774     ScaledReg = nullptr;
4775 
4776   // If folding this instruction (and it's subexprs) didn't extend any live
4777   // ranges, we're ok with it.
4778   if (!BaseReg && !ScaledReg)
4779     return true;
4780 
4781   // If all uses of this instruction can have the address mode sunk into them,
4782   // we can remove the addressing mode and effectively trade one live register
4783   // for another (at worst.)  In this context, folding an addressing mode into
4784   // the use is just a particularly nice way of sinking it.
4785   SmallVector<std::pair<Instruction*,unsigned>, 16> MemoryUses;
4786   SmallPtrSet<Instruction*, 16> ConsideredInsts;
4787   if (FindAllMemoryUses(I, MemoryUses, ConsideredInsts, TLI, TRI, OptSize,
4788                         PSI, BFI))
4789     return false;  // Has a non-memory, non-foldable use!
4790 
4791   // Now that we know that all uses of this instruction are part of a chain of
4792   // computation involving only operations that could theoretically be folded
4793   // into a memory use, loop over each of these memory operation uses and see
4794   // if they could  *actually* fold the instruction.  The assumption is that
4795   // addressing modes are cheap and that duplicating the computation involved
4796   // many times is worthwhile, even on a fastpath. For sinking candidates
4797   // (i.e. cold call sites), this serves as a way to prevent excessive code
4798   // growth since most architectures have some reasonable small and fast way to
4799   // compute an effective address.  (i.e LEA on x86)
4800   SmallVector<Instruction*, 32> MatchedAddrModeInsts;
4801   for (unsigned i = 0, e = MemoryUses.size(); i != e; ++i) {
4802     Instruction *User = MemoryUses[i].first;
4803     unsigned OpNo = MemoryUses[i].second;
4804 
4805     // Get the access type of this use.  If the use isn't a pointer, we don't
4806     // know what it accesses.
4807     Value *Address = User->getOperand(OpNo);
4808     PointerType *AddrTy = dyn_cast<PointerType>(Address->getType());
4809     if (!AddrTy)
4810       return false;
4811     Type *AddressAccessTy = AddrTy->getElementType();
4812     unsigned AS = AddrTy->getAddressSpace();
4813 
4814     // Do a match against the root of this address, ignoring profitability. This
4815     // will tell us if the addressing mode for the memory operation will
4816     // *actually* cover the shared instruction.
4817     ExtAddrMode Result;
4818     std::pair<AssertingVH<GetElementPtrInst>, int64_t> LargeOffsetGEP(nullptr,
4819                                                                       0);
4820     TypePromotionTransaction::ConstRestorationPt LastKnownGood =
4821         TPT.getRestorationPoint();
4822     AddressingModeMatcher Matcher(
4823         MatchedAddrModeInsts, TLI, TRI, AddressAccessTy, AS, MemoryInst, Result,
4824         InsertedInsts, PromotedInsts, TPT, LargeOffsetGEP, OptSize, PSI, BFI);
4825     Matcher.IgnoreProfitability = true;
4826     bool Success = Matcher.matchAddr(Address, 0);
4827     (void)Success; assert(Success && "Couldn't select *anything*?");
4828 
4829     // The match was to check the profitability, the changes made are not
4830     // part of the original matcher. Therefore, they should be dropped
4831     // otherwise the original matcher will not present the right state.
4832     TPT.rollback(LastKnownGood);
4833 
4834     // If the match didn't cover I, then it won't be shared by it.
4835     if (!is_contained(MatchedAddrModeInsts, I))
4836       return false;
4837 
4838     MatchedAddrModeInsts.clear();
4839   }
4840 
4841   return true;
4842 }
4843 
4844 /// Return true if the specified values are defined in a
4845 /// different basic block than BB.
4846 static bool IsNonLocalValue(Value *V, BasicBlock *BB) {
4847   if (Instruction *I = dyn_cast<Instruction>(V))
4848     return I->getParent() != BB;
4849   return false;
4850 }
4851 
4852 /// Sink addressing mode computation immediate before MemoryInst if doing so
4853 /// can be done without increasing register pressure.  The need for the
4854 /// register pressure constraint means this can end up being an all or nothing
4855 /// decision for all uses of the same addressing computation.
4856 ///
4857 /// Load and Store Instructions often have addressing modes that can do
4858 /// significant amounts of computation. As such, instruction selection will try
4859 /// to get the load or store to do as much computation as possible for the
4860 /// program. The problem is that isel can only see within a single block. As
4861 /// such, we sink as much legal addressing mode work into the block as possible.
4862 ///
4863 /// This method is used to optimize both load/store and inline asms with memory
4864 /// operands.  It's also used to sink addressing computations feeding into cold
4865 /// call sites into their (cold) basic block.
4866 ///
4867 /// The motivation for handling sinking into cold blocks is that doing so can
4868 /// both enable other address mode sinking (by satisfying the register pressure
4869 /// constraint above), and reduce register pressure globally (by removing the
4870 /// addressing mode computation from the fast path entirely.).
4871 bool CodeGenPrepare::optimizeMemoryInst(Instruction *MemoryInst, Value *Addr,
4872                                         Type *AccessTy, unsigned AddrSpace) {
4873   Value *Repl = Addr;
4874 
4875   // Try to collapse single-value PHI nodes.  This is necessary to undo
4876   // unprofitable PRE transformations.
4877   SmallVector<Value*, 8> worklist;
4878   SmallPtrSet<Value*, 16> Visited;
4879   worklist.push_back(Addr);
4880 
4881   // Use a worklist to iteratively look through PHI and select nodes, and
4882   // ensure that the addressing mode obtained from the non-PHI/select roots of
4883   // the graph are compatible.
4884   bool PhiOrSelectSeen = false;
4885   SmallVector<Instruction*, 16> AddrModeInsts;
4886   const SimplifyQuery SQ(*DL, TLInfo);
4887   AddressingModeCombiner AddrModes(SQ, Addr);
4888   TypePromotionTransaction TPT(RemovedInsts);
4889   TypePromotionTransaction::ConstRestorationPt LastKnownGood =
4890       TPT.getRestorationPoint();
4891   while (!worklist.empty()) {
4892     Value *V = worklist.back();
4893     worklist.pop_back();
4894 
4895     // We allow traversing cyclic Phi nodes.
4896     // In case of success after this loop we ensure that traversing through
4897     // Phi nodes ends up with all cases to compute address of the form
4898     //    BaseGV + Base + Scale * Index + Offset
4899     // where Scale and Offset are constans and BaseGV, Base and Index
4900     // are exactly the same Values in all cases.
4901     // It means that BaseGV, Scale and Offset dominate our memory instruction
4902     // and have the same value as they had in address computation represented
4903     // as Phi. So we can safely sink address computation to memory instruction.
4904     if (!Visited.insert(V).second)
4905       continue;
4906 
4907     // For a PHI node, push all of its incoming values.
4908     if (PHINode *P = dyn_cast<PHINode>(V)) {
4909       for (Value *IncValue : P->incoming_values())
4910         worklist.push_back(IncValue);
4911       PhiOrSelectSeen = true;
4912       continue;
4913     }
4914     // Similar for select.
4915     if (SelectInst *SI = dyn_cast<SelectInst>(V)) {
4916       worklist.push_back(SI->getFalseValue());
4917       worklist.push_back(SI->getTrueValue());
4918       PhiOrSelectSeen = true;
4919       continue;
4920     }
4921 
4922     // For non-PHIs, determine the addressing mode being computed.  Note that
4923     // the result may differ depending on what other uses our candidate
4924     // addressing instructions might have.
4925     AddrModeInsts.clear();
4926     std::pair<AssertingVH<GetElementPtrInst>, int64_t> LargeOffsetGEP(nullptr,
4927                                                                       0);
4928     ExtAddrMode NewAddrMode = AddressingModeMatcher::Match(
4929         V, AccessTy, AddrSpace, MemoryInst, AddrModeInsts, *TLI, *TRI,
4930         InsertedInsts, PromotedInsts, TPT, LargeOffsetGEP, OptSize, PSI,
4931         BFI.get());
4932 
4933     GetElementPtrInst *GEP = LargeOffsetGEP.first;
4934     if (GEP && !NewGEPBases.count(GEP)) {
4935       // If splitting the underlying data structure can reduce the offset of a
4936       // GEP, collect the GEP.  Skip the GEPs that are the new bases of
4937       // previously split data structures.
4938       LargeOffsetGEPMap[GEP->getPointerOperand()].push_back(LargeOffsetGEP);
4939       if (LargeOffsetGEPID.find(GEP) == LargeOffsetGEPID.end())
4940         LargeOffsetGEPID[GEP] = LargeOffsetGEPID.size();
4941     }
4942 
4943     NewAddrMode.OriginalValue = V;
4944     if (!AddrModes.addNewAddrMode(NewAddrMode))
4945       break;
4946   }
4947 
4948   // Try to combine the AddrModes we've collected. If we couldn't collect any,
4949   // or we have multiple but either couldn't combine them or combining them
4950   // wouldn't do anything useful, bail out now.
4951   if (!AddrModes.combineAddrModes()) {
4952     TPT.rollback(LastKnownGood);
4953     return false;
4954   }
4955   TPT.commit();
4956 
4957   // Get the combined AddrMode (or the only AddrMode, if we only had one).
4958   ExtAddrMode AddrMode = AddrModes.getAddrMode();
4959 
4960   // If all the instructions matched are already in this BB, don't do anything.
4961   // If we saw a Phi node then it is not local definitely, and if we saw a select
4962   // then we want to push the address calculation past it even if it's already
4963   // in this BB.
4964   if (!PhiOrSelectSeen && none_of(AddrModeInsts, [&](Value *V) {
4965         return IsNonLocalValue(V, MemoryInst->getParent());
4966                   })) {
4967     LLVM_DEBUG(dbgs() << "CGP: Found      local addrmode: " << AddrMode
4968                       << "\n");
4969     return false;
4970   }
4971 
4972   // Insert this computation right after this user.  Since our caller is
4973   // scanning from the top of the BB to the bottom, reuse of the expr are
4974   // guaranteed to happen later.
4975   IRBuilder<> Builder(MemoryInst);
4976 
4977   // Now that we determined the addressing expression we want to use and know
4978   // that we have to sink it into this block.  Check to see if we have already
4979   // done this for some other load/store instr in this block.  If so, reuse
4980   // the computation.  Before attempting reuse, check if the address is valid
4981   // as it may have been erased.
4982 
4983   WeakTrackingVH SunkAddrVH = SunkAddrs[Addr];
4984 
4985   Value * SunkAddr = SunkAddrVH.pointsToAliveValue() ? SunkAddrVH : nullptr;
4986   if (SunkAddr) {
4987     LLVM_DEBUG(dbgs() << "CGP: Reusing nonlocal addrmode: " << AddrMode
4988                       << " for " << *MemoryInst << "\n");
4989     if (SunkAddr->getType() != Addr->getType())
4990       SunkAddr = Builder.CreatePointerCast(SunkAddr, Addr->getType());
4991   } else if (AddrSinkUsingGEPs || (!AddrSinkUsingGEPs.getNumOccurrences() &&
4992                                    SubtargetInfo->addrSinkUsingGEPs())) {
4993     // By default, we use the GEP-based method when AA is used later. This
4994     // prevents new inttoptr/ptrtoint pairs from degrading AA capabilities.
4995     LLVM_DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode
4996                       << " for " << *MemoryInst << "\n");
4997     Type *IntPtrTy = DL->getIntPtrType(Addr->getType());
4998     Value *ResultPtr = nullptr, *ResultIndex = nullptr;
4999 
5000     // First, find the pointer.
5001     if (AddrMode.BaseReg && AddrMode.BaseReg->getType()->isPointerTy()) {
5002       ResultPtr = AddrMode.BaseReg;
5003       AddrMode.BaseReg = nullptr;
5004     }
5005 
5006     if (AddrMode.Scale && AddrMode.ScaledReg->getType()->isPointerTy()) {
5007       // We can't add more than one pointer together, nor can we scale a
5008       // pointer (both of which seem meaningless).
5009       if (ResultPtr || AddrMode.Scale != 1)
5010         return false;
5011 
5012       ResultPtr = AddrMode.ScaledReg;
5013       AddrMode.Scale = 0;
5014     }
5015 
5016     // It is only safe to sign extend the BaseReg if we know that the math
5017     // required to create it did not overflow before we extend it. Since
5018     // the original IR value was tossed in favor of a constant back when
5019     // the AddrMode was created we need to bail out gracefully if widths
5020     // do not match instead of extending it.
5021     //
5022     // (See below for code to add the scale.)
5023     if (AddrMode.Scale) {
5024       Type *ScaledRegTy = AddrMode.ScaledReg->getType();
5025       if (cast<IntegerType>(IntPtrTy)->getBitWidth() >
5026           cast<IntegerType>(ScaledRegTy)->getBitWidth())
5027         return false;
5028     }
5029 
5030     if (AddrMode.BaseGV) {
5031       if (ResultPtr)
5032         return false;
5033 
5034       ResultPtr = AddrMode.BaseGV;
5035     }
5036 
5037     // If the real base value actually came from an inttoptr, then the matcher
5038     // will look through it and provide only the integer value. In that case,
5039     // use it here.
5040     if (!DL->isNonIntegralPointerType(Addr->getType())) {
5041       if (!ResultPtr && AddrMode.BaseReg) {
5042         ResultPtr = Builder.CreateIntToPtr(AddrMode.BaseReg, Addr->getType(),
5043                                            "sunkaddr");
5044         AddrMode.BaseReg = nullptr;
5045       } else if (!ResultPtr && AddrMode.Scale == 1) {
5046         ResultPtr = Builder.CreateIntToPtr(AddrMode.ScaledReg, Addr->getType(),
5047                                            "sunkaddr");
5048         AddrMode.Scale = 0;
5049       }
5050     }
5051 
5052     if (!ResultPtr &&
5053         !AddrMode.BaseReg && !AddrMode.Scale && !AddrMode.BaseOffs) {
5054       SunkAddr = Constant::getNullValue(Addr->getType());
5055     } else if (!ResultPtr) {
5056       return false;
5057     } else {
5058       Type *I8PtrTy =
5059           Builder.getInt8PtrTy(Addr->getType()->getPointerAddressSpace());
5060       Type *I8Ty = Builder.getInt8Ty();
5061 
5062       // Start with the base register. Do this first so that subsequent address
5063       // matching finds it last, which will prevent it from trying to match it
5064       // as the scaled value in case it happens to be a mul. That would be
5065       // problematic if we've sunk a different mul for the scale, because then
5066       // we'd end up sinking both muls.
5067       if (AddrMode.BaseReg) {
5068         Value *V = AddrMode.BaseReg;
5069         if (V->getType() != IntPtrTy)
5070           V = Builder.CreateIntCast(V, IntPtrTy, /*isSigned=*/true, "sunkaddr");
5071 
5072         ResultIndex = V;
5073       }
5074 
5075       // Add the scale value.
5076       if (AddrMode.Scale) {
5077         Value *V = AddrMode.ScaledReg;
5078         if (V->getType() == IntPtrTy) {
5079           // done.
5080         } else {
5081           assert(cast<IntegerType>(IntPtrTy)->getBitWidth() <
5082                  cast<IntegerType>(V->getType())->getBitWidth() &&
5083                  "We can't transform if ScaledReg is too narrow");
5084           V = Builder.CreateTrunc(V, IntPtrTy, "sunkaddr");
5085         }
5086 
5087         if (AddrMode.Scale != 1)
5088           V = Builder.CreateMul(V, ConstantInt::get(IntPtrTy, AddrMode.Scale),
5089                                 "sunkaddr");
5090         if (ResultIndex)
5091           ResultIndex = Builder.CreateAdd(ResultIndex, V, "sunkaddr");
5092         else
5093           ResultIndex = V;
5094       }
5095 
5096       // Add in the Base Offset if present.
5097       if (AddrMode.BaseOffs) {
5098         Value *V = ConstantInt::get(IntPtrTy, AddrMode.BaseOffs);
5099         if (ResultIndex) {
5100           // We need to add this separately from the scale above to help with
5101           // SDAG consecutive load/store merging.
5102           if (ResultPtr->getType() != I8PtrTy)
5103             ResultPtr = Builder.CreatePointerCast(ResultPtr, I8PtrTy);
5104           ResultPtr =
5105               AddrMode.InBounds
5106                   ? Builder.CreateInBoundsGEP(I8Ty, ResultPtr, ResultIndex,
5107                                               "sunkaddr")
5108                   : Builder.CreateGEP(I8Ty, ResultPtr, ResultIndex, "sunkaddr");
5109         }
5110 
5111         ResultIndex = V;
5112       }
5113 
5114       if (!ResultIndex) {
5115         SunkAddr = ResultPtr;
5116       } else {
5117         if (ResultPtr->getType() != I8PtrTy)
5118           ResultPtr = Builder.CreatePointerCast(ResultPtr, I8PtrTy);
5119         SunkAddr =
5120             AddrMode.InBounds
5121                 ? Builder.CreateInBoundsGEP(I8Ty, ResultPtr, ResultIndex,
5122                                             "sunkaddr")
5123                 : Builder.CreateGEP(I8Ty, ResultPtr, ResultIndex, "sunkaddr");
5124       }
5125 
5126       if (SunkAddr->getType() != Addr->getType())
5127         SunkAddr = Builder.CreatePointerCast(SunkAddr, Addr->getType());
5128     }
5129   } else {
5130     // We'd require a ptrtoint/inttoptr down the line, which we can't do for
5131     // non-integral pointers, so in that case bail out now.
5132     Type *BaseTy = AddrMode.BaseReg ? AddrMode.BaseReg->getType() : nullptr;
5133     Type *ScaleTy = AddrMode.Scale ? AddrMode.ScaledReg->getType() : nullptr;
5134     PointerType *BasePtrTy = dyn_cast_or_null<PointerType>(BaseTy);
5135     PointerType *ScalePtrTy = dyn_cast_or_null<PointerType>(ScaleTy);
5136     if (DL->isNonIntegralPointerType(Addr->getType()) ||
5137         (BasePtrTy && DL->isNonIntegralPointerType(BasePtrTy)) ||
5138         (ScalePtrTy && DL->isNonIntegralPointerType(ScalePtrTy)) ||
5139         (AddrMode.BaseGV &&
5140          DL->isNonIntegralPointerType(AddrMode.BaseGV->getType())))
5141       return false;
5142 
5143     LLVM_DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode
5144                       << " for " << *MemoryInst << "\n");
5145     Type *IntPtrTy = DL->getIntPtrType(Addr->getType());
5146     Value *Result = nullptr;
5147 
5148     // Start with the base register. Do this first so that subsequent address
5149     // matching finds it last, which will prevent it from trying to match it
5150     // as the scaled value in case it happens to be a mul. That would be
5151     // problematic if we've sunk a different mul for the scale, because then
5152     // we'd end up sinking both muls.
5153     if (AddrMode.BaseReg) {
5154       Value *V = AddrMode.BaseReg;
5155       if (V->getType()->isPointerTy())
5156         V = Builder.CreatePtrToInt(V, IntPtrTy, "sunkaddr");
5157       if (V->getType() != IntPtrTy)
5158         V = Builder.CreateIntCast(V, IntPtrTy, /*isSigned=*/true, "sunkaddr");
5159       Result = V;
5160     }
5161 
5162     // Add the scale value.
5163     if (AddrMode.Scale) {
5164       Value *V = AddrMode.ScaledReg;
5165       if (V->getType() == IntPtrTy) {
5166         // done.
5167       } else if (V->getType()->isPointerTy()) {
5168         V = Builder.CreatePtrToInt(V, IntPtrTy, "sunkaddr");
5169       } else if (cast<IntegerType>(IntPtrTy)->getBitWidth() <
5170                  cast<IntegerType>(V->getType())->getBitWidth()) {
5171         V = Builder.CreateTrunc(V, IntPtrTy, "sunkaddr");
5172       } else {
5173         // It is only safe to sign extend the BaseReg if we know that the math
5174         // required to create it did not overflow before we extend it. Since
5175         // the original IR value was tossed in favor of a constant back when
5176         // the AddrMode was created we need to bail out gracefully if widths
5177         // do not match instead of extending it.
5178         Instruction *I = dyn_cast_or_null<Instruction>(Result);
5179         if (I && (Result != AddrMode.BaseReg))
5180           I->eraseFromParent();
5181         return false;
5182       }
5183       if (AddrMode.Scale != 1)
5184         V = Builder.CreateMul(V, ConstantInt::get(IntPtrTy, AddrMode.Scale),
5185                               "sunkaddr");
5186       if (Result)
5187         Result = Builder.CreateAdd(Result, V, "sunkaddr");
5188       else
5189         Result = V;
5190     }
5191 
5192     // Add in the BaseGV if present.
5193     if (AddrMode.BaseGV) {
5194       Value *V = Builder.CreatePtrToInt(AddrMode.BaseGV, IntPtrTy, "sunkaddr");
5195       if (Result)
5196         Result = Builder.CreateAdd(Result, V, "sunkaddr");
5197       else
5198         Result = V;
5199     }
5200 
5201     // Add in the Base Offset if present.
5202     if (AddrMode.BaseOffs) {
5203       Value *V = ConstantInt::get(IntPtrTy, AddrMode.BaseOffs);
5204       if (Result)
5205         Result = Builder.CreateAdd(Result, V, "sunkaddr");
5206       else
5207         Result = V;
5208     }
5209 
5210     if (!Result)
5211       SunkAddr = Constant::getNullValue(Addr->getType());
5212     else
5213       SunkAddr = Builder.CreateIntToPtr(Result, Addr->getType(), "sunkaddr");
5214   }
5215 
5216   MemoryInst->replaceUsesOfWith(Repl, SunkAddr);
5217   // Store the newly computed address into the cache. In the case we reused a
5218   // value, this should be idempotent.
5219   SunkAddrs[Addr] = WeakTrackingVH(SunkAddr);
5220 
5221   // If we have no uses, recursively delete the value and all dead instructions
5222   // using it.
5223   if (Repl->use_empty()) {
5224     // This can cause recursive deletion, which can invalidate our iterator.
5225     // Use a WeakTrackingVH to hold onto it in case this happens.
5226     Value *CurValue = &*CurInstIterator;
5227     WeakTrackingVH IterHandle(CurValue);
5228     BasicBlock *BB = CurInstIterator->getParent();
5229 
5230     RecursivelyDeleteTriviallyDeadInstructions(Repl, TLInfo);
5231 
5232     if (IterHandle != CurValue) {
5233       // If the iterator instruction was recursively deleted, start over at the
5234       // start of the block.
5235       CurInstIterator = BB->begin();
5236       SunkAddrs.clear();
5237     }
5238   }
5239   ++NumMemoryInsts;
5240   return true;
5241 }
5242 
5243 /// Rewrite GEP input to gather/scatter to enable SelectionDAGBuilder to find
5244 /// a uniform base to use for ISD::MGATHER/MSCATTER. SelectionDAGBuilder can
5245 /// only handle a 2 operand GEP in the same basic block or a splat constant
5246 /// vector. The 2 operands to the GEP must have a scalar pointer and a vector
5247 /// index.
5248 ///
5249 /// If the existing GEP has a vector base pointer that is splat, we can look
5250 /// through the splat to find the scalar pointer. If we can't find a scalar
5251 /// pointer there's nothing we can do.
5252 ///
5253 /// If we have a GEP with more than 2 indices where the middle indices are all
5254 /// zeroes, we can replace it with 2 GEPs where the second has 2 operands.
5255 ///
5256 /// If the final index isn't a vector or is a splat, we can emit a scalar GEP
5257 /// followed by a GEP with an all zeroes vector index. This will enable
5258 /// SelectionDAGBuilder to use a the scalar GEP as the uniform base and have a
5259 /// zero index.
5260 bool CodeGenPrepare::optimizeGatherScatterInst(Instruction *MemoryInst,
5261                                                Value *Ptr) {
5262   const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr);
5263   if (!GEP || !GEP->hasIndices())
5264     return false;
5265 
5266   // If the GEP and the gather/scatter aren't in the same BB, don't optimize.
5267   // FIXME: We should support this by sinking the GEP.
5268   if (MemoryInst->getParent() != GEP->getParent())
5269     return false;
5270 
5271   SmallVector<Value *, 2> Ops(GEP->op_begin(), GEP->op_end());
5272 
5273   bool RewriteGEP = false;
5274 
5275   if (Ops[0]->getType()->isVectorTy()) {
5276     Ops[0] = const_cast<Value *>(getSplatValue(Ops[0]));
5277     if (!Ops[0])
5278       return false;
5279     RewriteGEP = true;
5280   }
5281 
5282   unsigned FinalIndex = Ops.size() - 1;
5283 
5284   // Ensure all but the last index is 0.
5285   // FIXME: This isn't strictly required. All that's required is that they are
5286   // all scalars or splats.
5287   for (unsigned i = 1; i < FinalIndex; ++i) {
5288     auto *C = dyn_cast<Constant>(Ops[i]);
5289     if (!C)
5290       return false;
5291     if (isa<VectorType>(C->getType()))
5292       C = C->getSplatValue();
5293     auto *CI = dyn_cast_or_null<ConstantInt>(C);
5294     if (!CI || !CI->isZero())
5295       return false;
5296     // Scalarize the index if needed.
5297     Ops[i] = CI;
5298   }
5299 
5300   // Try to scalarize the final index.
5301   if (Ops[FinalIndex]->getType()->isVectorTy()) {
5302     if (Value *V = const_cast<Value *>(getSplatValue(Ops[FinalIndex]))) {
5303       auto *C = dyn_cast<ConstantInt>(V);
5304       // Don't scalarize all zeros vector.
5305       if (!C || !C->isZero()) {
5306         Ops[FinalIndex] = V;
5307         RewriteGEP = true;
5308       }
5309     }
5310   }
5311 
5312   // If we made any changes or the we have extra operands, we need to generate
5313   // new instructions.
5314   if (!RewriteGEP && Ops.size() == 2)
5315     return false;
5316 
5317   unsigned NumElts = cast<VectorType>(Ptr->getType())->getNumElements();
5318 
5319   IRBuilder<> Builder(MemoryInst);
5320 
5321   Type *ScalarIndexTy = DL->getIndexType(Ops[0]->getType()->getScalarType());
5322 
5323   Value *NewAddr;
5324 
5325   // If the final index isn't a vector, emit a scalar GEP containing all ops
5326   // and a vector GEP with all zeroes final index.
5327   if (!Ops[FinalIndex]->getType()->isVectorTy()) {
5328     NewAddr = Builder.CreateGEP(Ops[0], makeArrayRef(Ops).drop_front());
5329     auto *IndexTy = FixedVectorType::get(ScalarIndexTy, NumElts);
5330     NewAddr = Builder.CreateGEP(NewAddr, Constant::getNullValue(IndexTy));
5331   } else {
5332     Value *Base = Ops[0];
5333     Value *Index = Ops[FinalIndex];
5334 
5335     // Create a scalar GEP if there are more than 2 operands.
5336     if (Ops.size() != 2) {
5337       // Replace the last index with 0.
5338       Ops[FinalIndex] = Constant::getNullValue(ScalarIndexTy);
5339       Base = Builder.CreateGEP(Base, makeArrayRef(Ops).drop_front());
5340     }
5341 
5342     // Now create the GEP with scalar pointer and vector index.
5343     NewAddr = Builder.CreateGEP(Base, Index);
5344   }
5345 
5346   MemoryInst->replaceUsesOfWith(Ptr, NewAddr);
5347 
5348   // If we have no uses, recursively delete the value and all dead instructions
5349   // using it.
5350   if (Ptr->use_empty())
5351     RecursivelyDeleteTriviallyDeadInstructions(Ptr, TLInfo);
5352 
5353   return true;
5354 }
5355 
5356 /// If there are any memory operands, use OptimizeMemoryInst to sink their
5357 /// address computing into the block when possible / profitable.
5358 bool CodeGenPrepare::optimizeInlineAsmInst(CallInst *CS) {
5359   bool MadeChange = false;
5360 
5361   const TargetRegisterInfo *TRI =
5362       TM->getSubtargetImpl(*CS->getFunction())->getRegisterInfo();
5363   TargetLowering::AsmOperandInfoVector TargetConstraints =
5364       TLI->ParseConstraints(*DL, TRI, *CS);
5365   unsigned ArgNo = 0;
5366   for (unsigned i = 0, e = TargetConstraints.size(); i != e; ++i) {
5367     TargetLowering::AsmOperandInfo &OpInfo = TargetConstraints[i];
5368 
5369     // Compute the constraint code and ConstraintType to use.
5370     TLI->ComputeConstraintToUse(OpInfo, SDValue());
5371 
5372     if (OpInfo.ConstraintType == TargetLowering::C_Memory &&
5373         OpInfo.isIndirect) {
5374       Value *OpVal = CS->getArgOperand(ArgNo++);
5375       MadeChange |= optimizeMemoryInst(CS, OpVal, OpVal->getType(), ~0u);
5376     } else if (OpInfo.Type == InlineAsm::isInput)
5377       ArgNo++;
5378   }
5379 
5380   return MadeChange;
5381 }
5382 
5383 /// Check if all the uses of \p Val are equivalent (or free) zero or
5384 /// sign extensions.
5385 static bool hasSameExtUse(Value *Val, const TargetLowering &TLI) {
5386   assert(!Val->use_empty() && "Input must have at least one use");
5387   const Instruction *FirstUser = cast<Instruction>(*Val->user_begin());
5388   bool IsSExt = isa<SExtInst>(FirstUser);
5389   Type *ExtTy = FirstUser->getType();
5390   for (const User *U : Val->users()) {
5391     const Instruction *UI = cast<Instruction>(U);
5392     if ((IsSExt && !isa<SExtInst>(UI)) || (!IsSExt && !isa<ZExtInst>(UI)))
5393       return false;
5394     Type *CurTy = UI->getType();
5395     // Same input and output types: Same instruction after CSE.
5396     if (CurTy == ExtTy)
5397       continue;
5398 
5399     // If IsSExt is true, we are in this situation:
5400     // a = Val
5401     // b = sext ty1 a to ty2
5402     // c = sext ty1 a to ty3
5403     // Assuming ty2 is shorter than ty3, this could be turned into:
5404     // a = Val
5405     // b = sext ty1 a to ty2
5406     // c = sext ty2 b to ty3
5407     // However, the last sext is not free.
5408     if (IsSExt)
5409       return false;
5410 
5411     // This is a ZExt, maybe this is free to extend from one type to another.
5412     // In that case, we would not account for a different use.
5413     Type *NarrowTy;
5414     Type *LargeTy;
5415     if (ExtTy->getScalarType()->getIntegerBitWidth() >
5416         CurTy->getScalarType()->getIntegerBitWidth()) {
5417       NarrowTy = CurTy;
5418       LargeTy = ExtTy;
5419     } else {
5420       NarrowTy = ExtTy;
5421       LargeTy = CurTy;
5422     }
5423 
5424     if (!TLI.isZExtFree(NarrowTy, LargeTy))
5425       return false;
5426   }
5427   // All uses are the same or can be derived from one another for free.
5428   return true;
5429 }
5430 
5431 /// Try to speculatively promote extensions in \p Exts and continue
5432 /// promoting through newly promoted operands recursively as far as doing so is
5433 /// profitable. Save extensions profitably moved up, in \p ProfitablyMovedExts.
5434 /// When some promotion happened, \p TPT contains the proper state to revert
5435 /// them.
5436 ///
5437 /// \return true if some promotion happened, false otherwise.
5438 bool CodeGenPrepare::tryToPromoteExts(
5439     TypePromotionTransaction &TPT, const SmallVectorImpl<Instruction *> &Exts,
5440     SmallVectorImpl<Instruction *> &ProfitablyMovedExts,
5441     unsigned CreatedInstsCost) {
5442   bool Promoted = false;
5443 
5444   // Iterate over all the extensions to try to promote them.
5445   for (auto *I : Exts) {
5446     // Early check if we directly have ext(load).
5447     if (isa<LoadInst>(I->getOperand(0))) {
5448       ProfitablyMovedExts.push_back(I);
5449       continue;
5450     }
5451 
5452     // Check whether or not we want to do any promotion.  The reason we have
5453     // this check inside the for loop is to catch the case where an extension
5454     // is directly fed by a load because in such case the extension can be moved
5455     // up without any promotion on its operands.
5456     if (!TLI->enableExtLdPromotion() || DisableExtLdPromotion)
5457       return false;
5458 
5459     // Get the action to perform the promotion.
5460     TypePromotionHelper::Action TPH =
5461         TypePromotionHelper::getAction(I, InsertedInsts, *TLI, PromotedInsts);
5462     // Check if we can promote.
5463     if (!TPH) {
5464       // Save the current extension as we cannot move up through its operand.
5465       ProfitablyMovedExts.push_back(I);
5466       continue;
5467     }
5468 
5469     // Save the current state.
5470     TypePromotionTransaction::ConstRestorationPt LastKnownGood =
5471         TPT.getRestorationPoint();
5472     SmallVector<Instruction *, 4> NewExts;
5473     unsigned NewCreatedInstsCost = 0;
5474     unsigned ExtCost = !TLI->isExtFree(I);
5475     // Promote.
5476     Value *PromotedVal = TPH(I, TPT, PromotedInsts, NewCreatedInstsCost,
5477                              &NewExts, nullptr, *TLI);
5478     assert(PromotedVal &&
5479            "TypePromotionHelper should have filtered out those cases");
5480 
5481     // We would be able to merge only one extension in a load.
5482     // Therefore, if we have more than 1 new extension we heuristically
5483     // cut this search path, because it means we degrade the code quality.
5484     // With exactly 2, the transformation is neutral, because we will merge
5485     // one extension but leave one. However, we optimistically keep going,
5486     // because the new extension may be removed too.
5487     long long TotalCreatedInstsCost = CreatedInstsCost + NewCreatedInstsCost;
5488     // FIXME: It would be possible to propagate a negative value instead of
5489     // conservatively ceiling it to 0.
5490     TotalCreatedInstsCost =
5491         std::max((long long)0, (TotalCreatedInstsCost - ExtCost));
5492     if (!StressExtLdPromotion &&
5493         (TotalCreatedInstsCost > 1 ||
5494          !isPromotedInstructionLegal(*TLI, *DL, PromotedVal))) {
5495       // This promotion is not profitable, rollback to the previous state, and
5496       // save the current extension in ProfitablyMovedExts as the latest
5497       // speculative promotion turned out to be unprofitable.
5498       TPT.rollback(LastKnownGood);
5499       ProfitablyMovedExts.push_back(I);
5500       continue;
5501     }
5502     // Continue promoting NewExts as far as doing so is profitable.
5503     SmallVector<Instruction *, 2> NewlyMovedExts;
5504     (void)tryToPromoteExts(TPT, NewExts, NewlyMovedExts, TotalCreatedInstsCost);
5505     bool NewPromoted = false;
5506     for (auto *ExtInst : NewlyMovedExts) {
5507       Instruction *MovedExt = cast<Instruction>(ExtInst);
5508       Value *ExtOperand = MovedExt->getOperand(0);
5509       // If we have reached to a load, we need this extra profitability check
5510       // as it could potentially be merged into an ext(load).
5511       if (isa<LoadInst>(ExtOperand) &&
5512           !(StressExtLdPromotion || NewCreatedInstsCost <= ExtCost ||
5513             (ExtOperand->hasOneUse() || hasSameExtUse(ExtOperand, *TLI))))
5514         continue;
5515 
5516       ProfitablyMovedExts.push_back(MovedExt);
5517       NewPromoted = true;
5518     }
5519 
5520     // If none of speculative promotions for NewExts is profitable, rollback
5521     // and save the current extension (I) as the last profitable extension.
5522     if (!NewPromoted) {
5523       TPT.rollback(LastKnownGood);
5524       ProfitablyMovedExts.push_back(I);
5525       continue;
5526     }
5527     // The promotion is profitable.
5528     Promoted = true;
5529   }
5530   return Promoted;
5531 }
5532 
5533 /// Merging redundant sexts when one is dominating the other.
5534 bool CodeGenPrepare::mergeSExts(Function &F) {
5535   bool Changed = false;
5536   for (auto &Entry : ValToSExtendedUses) {
5537     SExts &Insts = Entry.second;
5538     SExts CurPts;
5539     for (Instruction *Inst : Insts) {
5540       if (RemovedInsts.count(Inst) || !isa<SExtInst>(Inst) ||
5541           Inst->getOperand(0) != Entry.first)
5542         continue;
5543       bool inserted = false;
5544       for (auto &Pt : CurPts) {
5545         if (getDT(F).dominates(Inst, Pt)) {
5546           Pt->replaceAllUsesWith(Inst);
5547           RemovedInsts.insert(Pt);
5548           Pt->removeFromParent();
5549           Pt = Inst;
5550           inserted = true;
5551           Changed = true;
5552           break;
5553         }
5554         if (!getDT(F).dominates(Pt, Inst))
5555           // Give up if we need to merge in a common dominator as the
5556           // experiments show it is not profitable.
5557           continue;
5558         Inst->replaceAllUsesWith(Pt);
5559         RemovedInsts.insert(Inst);
5560         Inst->removeFromParent();
5561         inserted = true;
5562         Changed = true;
5563         break;
5564       }
5565       if (!inserted)
5566         CurPts.push_back(Inst);
5567     }
5568   }
5569   return Changed;
5570 }
5571 
5572 // Spliting large data structures so that the GEPs accessing them can have
5573 // smaller offsets so that they can be sunk to the same blocks as their users.
5574 // For example, a large struct starting from %base is splitted into two parts
5575 // where the second part starts from %new_base.
5576 //
5577 // Before:
5578 // BB0:
5579 //   %base     =
5580 //
5581 // BB1:
5582 //   %gep0     = gep %base, off0
5583 //   %gep1     = gep %base, off1
5584 //   %gep2     = gep %base, off2
5585 //
5586 // BB2:
5587 //   %load1    = load %gep0
5588 //   %load2    = load %gep1
5589 //   %load3    = load %gep2
5590 //
5591 // After:
5592 // BB0:
5593 //   %base     =
5594 //   %new_base = gep %base, off0
5595 //
5596 // BB1:
5597 //   %new_gep0 = %new_base
5598 //   %new_gep1 = gep %new_base, off1 - off0
5599 //   %new_gep2 = gep %new_base, off2 - off0
5600 //
5601 // BB2:
5602 //   %load1    = load i32, i32* %new_gep0
5603 //   %load2    = load i32, i32* %new_gep1
5604 //   %load3    = load i32, i32* %new_gep2
5605 //
5606 // %new_gep1 and %new_gep2 can be sunk to BB2 now after the splitting because
5607 // their offsets are smaller enough to fit into the addressing mode.
5608 bool CodeGenPrepare::splitLargeGEPOffsets() {
5609   bool Changed = false;
5610   for (auto &Entry : LargeOffsetGEPMap) {
5611     Value *OldBase = Entry.first;
5612     SmallVectorImpl<std::pair<AssertingVH<GetElementPtrInst>, int64_t>>
5613         &LargeOffsetGEPs = Entry.second;
5614     auto compareGEPOffset =
5615         [&](const std::pair<GetElementPtrInst *, int64_t> &LHS,
5616             const std::pair<GetElementPtrInst *, int64_t> &RHS) {
5617           if (LHS.first == RHS.first)
5618             return false;
5619           if (LHS.second != RHS.second)
5620             return LHS.second < RHS.second;
5621           return LargeOffsetGEPID[LHS.first] < LargeOffsetGEPID[RHS.first];
5622         };
5623     // Sorting all the GEPs of the same data structures based on the offsets.
5624     llvm::sort(LargeOffsetGEPs, compareGEPOffset);
5625     LargeOffsetGEPs.erase(
5626         std::unique(LargeOffsetGEPs.begin(), LargeOffsetGEPs.end()),
5627         LargeOffsetGEPs.end());
5628     // Skip if all the GEPs have the same offsets.
5629     if (LargeOffsetGEPs.front().second == LargeOffsetGEPs.back().second)
5630       continue;
5631     GetElementPtrInst *BaseGEP = LargeOffsetGEPs.begin()->first;
5632     int64_t BaseOffset = LargeOffsetGEPs.begin()->second;
5633     Value *NewBaseGEP = nullptr;
5634 
5635     auto *LargeOffsetGEP = LargeOffsetGEPs.begin();
5636     while (LargeOffsetGEP != LargeOffsetGEPs.end()) {
5637       GetElementPtrInst *GEP = LargeOffsetGEP->first;
5638       int64_t Offset = LargeOffsetGEP->second;
5639       if (Offset != BaseOffset) {
5640         TargetLowering::AddrMode AddrMode;
5641         AddrMode.BaseOffs = Offset - BaseOffset;
5642         // The result type of the GEP might not be the type of the memory
5643         // access.
5644         if (!TLI->isLegalAddressingMode(*DL, AddrMode,
5645                                         GEP->getResultElementType(),
5646                                         GEP->getAddressSpace())) {
5647           // We need to create a new base if the offset to the current base is
5648           // too large to fit into the addressing mode. So, a very large struct
5649           // may be splitted into several parts.
5650           BaseGEP = GEP;
5651           BaseOffset = Offset;
5652           NewBaseGEP = nullptr;
5653         }
5654       }
5655 
5656       // Generate a new GEP to replace the current one.
5657       LLVMContext &Ctx = GEP->getContext();
5658       Type *IntPtrTy = DL->getIntPtrType(GEP->getType());
5659       Type *I8PtrTy =
5660           Type::getInt8PtrTy(Ctx, GEP->getType()->getPointerAddressSpace());
5661       Type *I8Ty = Type::getInt8Ty(Ctx);
5662 
5663       if (!NewBaseGEP) {
5664         // Create a new base if we don't have one yet.  Find the insertion
5665         // pointer for the new base first.
5666         BasicBlock::iterator NewBaseInsertPt;
5667         BasicBlock *NewBaseInsertBB;
5668         if (auto *BaseI = dyn_cast<Instruction>(OldBase)) {
5669           // If the base of the struct is an instruction, the new base will be
5670           // inserted close to it.
5671           NewBaseInsertBB = BaseI->getParent();
5672           if (isa<PHINode>(BaseI))
5673             NewBaseInsertPt = NewBaseInsertBB->getFirstInsertionPt();
5674           else if (InvokeInst *Invoke = dyn_cast<InvokeInst>(BaseI)) {
5675             NewBaseInsertBB =
5676                 SplitEdge(NewBaseInsertBB, Invoke->getNormalDest());
5677             NewBaseInsertPt = NewBaseInsertBB->getFirstInsertionPt();
5678           } else
5679             NewBaseInsertPt = std::next(BaseI->getIterator());
5680         } else {
5681           // If the current base is an argument or global value, the new base
5682           // will be inserted to the entry block.
5683           NewBaseInsertBB = &BaseGEP->getFunction()->getEntryBlock();
5684           NewBaseInsertPt = NewBaseInsertBB->getFirstInsertionPt();
5685         }
5686         IRBuilder<> NewBaseBuilder(NewBaseInsertBB, NewBaseInsertPt);
5687         // Create a new base.
5688         Value *BaseIndex = ConstantInt::get(IntPtrTy, BaseOffset);
5689         NewBaseGEP = OldBase;
5690         if (NewBaseGEP->getType() != I8PtrTy)
5691           NewBaseGEP = NewBaseBuilder.CreatePointerCast(NewBaseGEP, I8PtrTy);
5692         NewBaseGEP =
5693             NewBaseBuilder.CreateGEP(I8Ty, NewBaseGEP, BaseIndex, "splitgep");
5694         NewGEPBases.insert(NewBaseGEP);
5695       }
5696 
5697       IRBuilder<> Builder(GEP);
5698       Value *NewGEP = NewBaseGEP;
5699       if (Offset == BaseOffset) {
5700         if (GEP->getType() != I8PtrTy)
5701           NewGEP = Builder.CreatePointerCast(NewGEP, GEP->getType());
5702       } else {
5703         // Calculate the new offset for the new GEP.
5704         Value *Index = ConstantInt::get(IntPtrTy, Offset - BaseOffset);
5705         NewGEP = Builder.CreateGEP(I8Ty, NewBaseGEP, Index);
5706 
5707         if (GEP->getType() != I8PtrTy)
5708           NewGEP = Builder.CreatePointerCast(NewGEP, GEP->getType());
5709       }
5710       GEP->replaceAllUsesWith(NewGEP);
5711       LargeOffsetGEPID.erase(GEP);
5712       LargeOffsetGEP = LargeOffsetGEPs.erase(LargeOffsetGEP);
5713       GEP->eraseFromParent();
5714       Changed = true;
5715     }
5716   }
5717   return Changed;
5718 }
5719 
5720 /// Return true, if an ext(load) can be formed from an extension in
5721 /// \p MovedExts.
5722 bool CodeGenPrepare::canFormExtLd(
5723     const SmallVectorImpl<Instruction *> &MovedExts, LoadInst *&LI,
5724     Instruction *&Inst, bool HasPromoted) {
5725   for (auto *MovedExtInst : MovedExts) {
5726     if (isa<LoadInst>(MovedExtInst->getOperand(0))) {
5727       LI = cast<LoadInst>(MovedExtInst->getOperand(0));
5728       Inst = MovedExtInst;
5729       break;
5730     }
5731   }
5732   if (!LI)
5733     return false;
5734 
5735   // If they're already in the same block, there's nothing to do.
5736   // Make the cheap checks first if we did not promote.
5737   // If we promoted, we need to check if it is indeed profitable.
5738   if (!HasPromoted && LI->getParent() == Inst->getParent())
5739     return false;
5740 
5741   return TLI->isExtLoad(LI, Inst, *DL);
5742 }
5743 
5744 /// Move a zext or sext fed by a load into the same basic block as the load,
5745 /// unless conditions are unfavorable. This allows SelectionDAG to fold the
5746 /// extend into the load.
5747 ///
5748 /// E.g.,
5749 /// \code
5750 /// %ld = load i32* %addr
5751 /// %add = add nuw i32 %ld, 4
5752 /// %zext = zext i32 %add to i64
5753 // \endcode
5754 /// =>
5755 /// \code
5756 /// %ld = load i32* %addr
5757 /// %zext = zext i32 %ld to i64
5758 /// %add = add nuw i64 %zext, 4
5759 /// \encode
5760 /// Note that the promotion in %add to i64 is done in tryToPromoteExts(), which
5761 /// allow us to match zext(load i32*) to i64.
5762 ///
5763 /// Also, try to promote the computations used to obtain a sign extended
5764 /// value used into memory accesses.
5765 /// E.g.,
5766 /// \code
5767 /// a = add nsw i32 b, 3
5768 /// d = sext i32 a to i64
5769 /// e = getelementptr ..., i64 d
5770 /// \endcode
5771 /// =>
5772 /// \code
5773 /// f = sext i32 b to i64
5774 /// a = add nsw i64 f, 3
5775 /// e = getelementptr ..., i64 a
5776 /// \endcode
5777 ///
5778 /// \p Inst[in/out] the extension may be modified during the process if some
5779 /// promotions apply.
5780 bool CodeGenPrepare::optimizeExt(Instruction *&Inst) {
5781   bool AllowPromotionWithoutCommonHeader = false;
5782   /// See if it is an interesting sext operations for the address type
5783   /// promotion before trying to promote it, e.g., the ones with the right
5784   /// type and used in memory accesses.
5785   bool ATPConsiderable = TTI->shouldConsiderAddressTypePromotion(
5786       *Inst, AllowPromotionWithoutCommonHeader);
5787   TypePromotionTransaction TPT(RemovedInsts);
5788   TypePromotionTransaction::ConstRestorationPt LastKnownGood =
5789       TPT.getRestorationPoint();
5790   SmallVector<Instruction *, 1> Exts;
5791   SmallVector<Instruction *, 2> SpeculativelyMovedExts;
5792   Exts.push_back(Inst);
5793 
5794   bool HasPromoted = tryToPromoteExts(TPT, Exts, SpeculativelyMovedExts);
5795 
5796   // Look for a load being extended.
5797   LoadInst *LI = nullptr;
5798   Instruction *ExtFedByLoad;
5799 
5800   // Try to promote a chain of computation if it allows to form an extended
5801   // load.
5802   if (canFormExtLd(SpeculativelyMovedExts, LI, ExtFedByLoad, HasPromoted)) {
5803     assert(LI && ExtFedByLoad && "Expect a valid load and extension");
5804     TPT.commit();
5805     // Move the extend into the same block as the load.
5806     ExtFedByLoad->moveAfter(LI);
5807     ++NumExtsMoved;
5808     Inst = ExtFedByLoad;
5809     return true;
5810   }
5811 
5812   // Continue promoting SExts if known as considerable depending on targets.
5813   if (ATPConsiderable &&
5814       performAddressTypePromotion(Inst, AllowPromotionWithoutCommonHeader,
5815                                   HasPromoted, TPT, SpeculativelyMovedExts))
5816     return true;
5817 
5818   TPT.rollback(LastKnownGood);
5819   return false;
5820 }
5821 
5822 // Perform address type promotion if doing so is profitable.
5823 // If AllowPromotionWithoutCommonHeader == false, we should find other sext
5824 // instructions that sign extended the same initial value. However, if
5825 // AllowPromotionWithoutCommonHeader == true, we expect promoting the
5826 // extension is just profitable.
5827 bool CodeGenPrepare::performAddressTypePromotion(
5828     Instruction *&Inst, bool AllowPromotionWithoutCommonHeader,
5829     bool HasPromoted, TypePromotionTransaction &TPT,
5830     SmallVectorImpl<Instruction *> &SpeculativelyMovedExts) {
5831   bool Promoted = false;
5832   SmallPtrSet<Instruction *, 1> UnhandledExts;
5833   bool AllSeenFirst = true;
5834   for (auto *I : SpeculativelyMovedExts) {
5835     Value *HeadOfChain = I->getOperand(0);
5836     DenseMap<Value *, Instruction *>::iterator AlreadySeen =
5837         SeenChainsForSExt.find(HeadOfChain);
5838     // If there is an unhandled SExt which has the same header, try to promote
5839     // it as well.
5840     if (AlreadySeen != SeenChainsForSExt.end()) {
5841       if (AlreadySeen->second != nullptr)
5842         UnhandledExts.insert(AlreadySeen->second);
5843       AllSeenFirst = false;
5844     }
5845   }
5846 
5847   if (!AllSeenFirst || (AllowPromotionWithoutCommonHeader &&
5848                         SpeculativelyMovedExts.size() == 1)) {
5849     TPT.commit();
5850     if (HasPromoted)
5851       Promoted = true;
5852     for (auto *I : SpeculativelyMovedExts) {
5853       Value *HeadOfChain = I->getOperand(0);
5854       SeenChainsForSExt[HeadOfChain] = nullptr;
5855       ValToSExtendedUses[HeadOfChain].push_back(I);
5856     }
5857     // Update Inst as promotion happen.
5858     Inst = SpeculativelyMovedExts.pop_back_val();
5859   } else {
5860     // This is the first chain visited from the header, keep the current chain
5861     // as unhandled. Defer to promote this until we encounter another SExt
5862     // chain derived from the same header.
5863     for (auto *I : SpeculativelyMovedExts) {
5864       Value *HeadOfChain = I->getOperand(0);
5865       SeenChainsForSExt[HeadOfChain] = Inst;
5866     }
5867     return false;
5868   }
5869 
5870   if (!AllSeenFirst && !UnhandledExts.empty())
5871     for (auto *VisitedSExt : UnhandledExts) {
5872       if (RemovedInsts.count(VisitedSExt))
5873         continue;
5874       TypePromotionTransaction TPT(RemovedInsts);
5875       SmallVector<Instruction *, 1> Exts;
5876       SmallVector<Instruction *, 2> Chains;
5877       Exts.push_back(VisitedSExt);
5878       bool HasPromoted = tryToPromoteExts(TPT, Exts, Chains);
5879       TPT.commit();
5880       if (HasPromoted)
5881         Promoted = true;
5882       for (auto *I : Chains) {
5883         Value *HeadOfChain = I->getOperand(0);
5884         // Mark this as handled.
5885         SeenChainsForSExt[HeadOfChain] = nullptr;
5886         ValToSExtendedUses[HeadOfChain].push_back(I);
5887       }
5888     }
5889   return Promoted;
5890 }
5891 
5892 bool CodeGenPrepare::optimizeExtUses(Instruction *I) {
5893   BasicBlock *DefBB = I->getParent();
5894 
5895   // If the result of a {s|z}ext and its source are both live out, rewrite all
5896   // other uses of the source with result of extension.
5897   Value *Src = I->getOperand(0);
5898   if (Src->hasOneUse())
5899     return false;
5900 
5901   // Only do this xform if truncating is free.
5902   if (!TLI->isTruncateFree(I->getType(), Src->getType()))
5903     return false;
5904 
5905   // Only safe to perform the optimization if the source is also defined in
5906   // this block.
5907   if (!isa<Instruction>(Src) || DefBB != cast<Instruction>(Src)->getParent())
5908     return false;
5909 
5910   bool DefIsLiveOut = false;
5911   for (User *U : I->users()) {
5912     Instruction *UI = cast<Instruction>(U);
5913 
5914     // Figure out which BB this ext is used in.
5915     BasicBlock *UserBB = UI->getParent();
5916     if (UserBB == DefBB) continue;
5917     DefIsLiveOut = true;
5918     break;
5919   }
5920   if (!DefIsLiveOut)
5921     return false;
5922 
5923   // Make sure none of the uses are PHI nodes.
5924   for (User *U : Src->users()) {
5925     Instruction *UI = cast<Instruction>(U);
5926     BasicBlock *UserBB = UI->getParent();
5927     if (UserBB == DefBB) continue;
5928     // Be conservative. We don't want this xform to end up introducing
5929     // reloads just before load / store instructions.
5930     if (isa<PHINode>(UI) || isa<LoadInst>(UI) || isa<StoreInst>(UI))
5931       return false;
5932   }
5933 
5934   // InsertedTruncs - Only insert one trunc in each block once.
5935   DenseMap<BasicBlock*, Instruction*> InsertedTruncs;
5936 
5937   bool MadeChange = false;
5938   for (Use &U : Src->uses()) {
5939     Instruction *User = cast<Instruction>(U.getUser());
5940 
5941     // Figure out which BB this ext is used in.
5942     BasicBlock *UserBB = User->getParent();
5943     if (UserBB == DefBB) continue;
5944 
5945     // Both src and def are live in this block. Rewrite the use.
5946     Instruction *&InsertedTrunc = InsertedTruncs[UserBB];
5947 
5948     if (!InsertedTrunc) {
5949       BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt();
5950       assert(InsertPt != UserBB->end());
5951       InsertedTrunc = new TruncInst(I, Src->getType(), "", &*InsertPt);
5952       InsertedInsts.insert(InsertedTrunc);
5953     }
5954 
5955     // Replace a use of the {s|z}ext source with a use of the result.
5956     U = InsertedTrunc;
5957     ++NumExtUses;
5958     MadeChange = true;
5959   }
5960 
5961   return MadeChange;
5962 }
5963 
5964 // Find loads whose uses only use some of the loaded value's bits.  Add an "and"
5965 // just after the load if the target can fold this into one extload instruction,
5966 // with the hope of eliminating some of the other later "and" instructions using
5967 // the loaded value.  "and"s that are made trivially redundant by the insertion
5968 // of the new "and" are removed by this function, while others (e.g. those whose
5969 // path from the load goes through a phi) are left for isel to potentially
5970 // remove.
5971 //
5972 // For example:
5973 //
5974 // b0:
5975 //   x = load i32
5976 //   ...
5977 // b1:
5978 //   y = and x, 0xff
5979 //   z = use y
5980 //
5981 // becomes:
5982 //
5983 // b0:
5984 //   x = load i32
5985 //   x' = and x, 0xff
5986 //   ...
5987 // b1:
5988 //   z = use x'
5989 //
5990 // whereas:
5991 //
5992 // b0:
5993 //   x1 = load i32
5994 //   ...
5995 // b1:
5996 //   x2 = load i32
5997 //   ...
5998 // b2:
5999 //   x = phi x1, x2
6000 //   y = and x, 0xff
6001 //
6002 // becomes (after a call to optimizeLoadExt for each load):
6003 //
6004 // b0:
6005 //   x1 = load i32
6006 //   x1' = and x1, 0xff
6007 //   ...
6008 // b1:
6009 //   x2 = load i32
6010 //   x2' = and x2, 0xff
6011 //   ...
6012 // b2:
6013 //   x = phi x1', x2'
6014 //   y = and x, 0xff
6015 bool CodeGenPrepare::optimizeLoadExt(LoadInst *Load) {
6016   if (!Load->isSimple() || !Load->getType()->isIntOrPtrTy())
6017     return false;
6018 
6019   // Skip loads we've already transformed.
6020   if (Load->hasOneUse() &&
6021       InsertedInsts.count(cast<Instruction>(*Load->user_begin())))
6022     return false;
6023 
6024   // Look at all uses of Load, looking through phis, to determine how many bits
6025   // of the loaded value are needed.
6026   SmallVector<Instruction *, 8> WorkList;
6027   SmallPtrSet<Instruction *, 16> Visited;
6028   SmallVector<Instruction *, 8> AndsToMaybeRemove;
6029   for (auto *U : Load->users())
6030     WorkList.push_back(cast<Instruction>(U));
6031 
6032   EVT LoadResultVT = TLI->getValueType(*DL, Load->getType());
6033   unsigned BitWidth = LoadResultVT.getSizeInBits();
6034   APInt DemandBits(BitWidth, 0);
6035   APInt WidestAndBits(BitWidth, 0);
6036 
6037   while (!WorkList.empty()) {
6038     Instruction *I = WorkList.back();
6039     WorkList.pop_back();
6040 
6041     // Break use-def graph loops.
6042     if (!Visited.insert(I).second)
6043       continue;
6044 
6045     // For a PHI node, push all of its users.
6046     if (auto *Phi = dyn_cast<PHINode>(I)) {
6047       for (auto *U : Phi->users())
6048         WorkList.push_back(cast<Instruction>(U));
6049       continue;
6050     }
6051 
6052     switch (I->getOpcode()) {
6053     case Instruction::And: {
6054       auto *AndC = dyn_cast<ConstantInt>(I->getOperand(1));
6055       if (!AndC)
6056         return false;
6057       APInt AndBits = AndC->getValue();
6058       DemandBits |= AndBits;
6059       // Keep track of the widest and mask we see.
6060       if (AndBits.ugt(WidestAndBits))
6061         WidestAndBits = AndBits;
6062       if (AndBits == WidestAndBits && I->getOperand(0) == Load)
6063         AndsToMaybeRemove.push_back(I);
6064       break;
6065     }
6066 
6067     case Instruction::Shl: {
6068       auto *ShlC = dyn_cast<ConstantInt>(I->getOperand(1));
6069       if (!ShlC)
6070         return false;
6071       uint64_t ShiftAmt = ShlC->getLimitedValue(BitWidth - 1);
6072       DemandBits.setLowBits(BitWidth - ShiftAmt);
6073       break;
6074     }
6075 
6076     case Instruction::Trunc: {
6077       EVT TruncVT = TLI->getValueType(*DL, I->getType());
6078       unsigned TruncBitWidth = TruncVT.getSizeInBits();
6079       DemandBits.setLowBits(TruncBitWidth);
6080       break;
6081     }
6082 
6083     default:
6084       return false;
6085     }
6086   }
6087 
6088   uint32_t ActiveBits = DemandBits.getActiveBits();
6089   // Avoid hoisting (and (load x) 1) since it is unlikely to be folded by the
6090   // target even if isLoadExtLegal says an i1 EXTLOAD is valid.  For example,
6091   // for the AArch64 target isLoadExtLegal(ZEXTLOAD, i32, i1) returns true, but
6092   // (and (load x) 1) is not matched as a single instruction, rather as a LDR
6093   // followed by an AND.
6094   // TODO: Look into removing this restriction by fixing backends to either
6095   // return false for isLoadExtLegal for i1 or have them select this pattern to
6096   // a single instruction.
6097   //
6098   // Also avoid hoisting if we didn't see any ands with the exact DemandBits
6099   // mask, since these are the only ands that will be removed by isel.
6100   if (ActiveBits <= 1 || !DemandBits.isMask(ActiveBits) ||
6101       WidestAndBits != DemandBits)
6102     return false;
6103 
6104   LLVMContext &Ctx = Load->getType()->getContext();
6105   Type *TruncTy = Type::getIntNTy(Ctx, ActiveBits);
6106   EVT TruncVT = TLI->getValueType(*DL, TruncTy);
6107 
6108   // Reject cases that won't be matched as extloads.
6109   if (!LoadResultVT.bitsGT(TruncVT) || !TruncVT.isRound() ||
6110       !TLI->isLoadExtLegal(ISD::ZEXTLOAD, LoadResultVT, TruncVT))
6111     return false;
6112 
6113   IRBuilder<> Builder(Load->getNextNode());
6114   auto *NewAnd = cast<Instruction>(
6115       Builder.CreateAnd(Load, ConstantInt::get(Ctx, DemandBits)));
6116   // Mark this instruction as "inserted by CGP", so that other
6117   // optimizations don't touch it.
6118   InsertedInsts.insert(NewAnd);
6119 
6120   // Replace all uses of load with new and (except for the use of load in the
6121   // new and itself).
6122   Load->replaceAllUsesWith(NewAnd);
6123   NewAnd->setOperand(0, Load);
6124 
6125   // Remove any and instructions that are now redundant.
6126   for (auto *And : AndsToMaybeRemove)
6127     // Check that the and mask is the same as the one we decided to put on the
6128     // new and.
6129     if (cast<ConstantInt>(And->getOperand(1))->getValue() == DemandBits) {
6130       And->replaceAllUsesWith(NewAnd);
6131       if (&*CurInstIterator == And)
6132         CurInstIterator = std::next(And->getIterator());
6133       And->eraseFromParent();
6134       ++NumAndUses;
6135     }
6136 
6137   ++NumAndsAdded;
6138   return true;
6139 }
6140 
6141 /// Check if V (an operand of a select instruction) is an expensive instruction
6142 /// that is only used once.
6143 static bool sinkSelectOperand(const TargetTransformInfo *TTI, Value *V) {
6144   auto *I = dyn_cast<Instruction>(V);
6145   // If it's safe to speculatively execute, then it should not have side
6146   // effects; therefore, it's safe to sink and possibly *not* execute.
6147   return I && I->hasOneUse() && isSafeToSpeculativelyExecute(I) &&
6148          TTI->getUserCost(I, TargetTransformInfo::TCK_SizeAndLatency) >=
6149          TargetTransformInfo::TCC_Expensive;
6150 }
6151 
6152 /// Returns true if a SelectInst should be turned into an explicit branch.
6153 static bool isFormingBranchFromSelectProfitable(const TargetTransformInfo *TTI,
6154                                                 const TargetLowering *TLI,
6155                                                 SelectInst *SI) {
6156   // If even a predictable select is cheap, then a branch can't be cheaper.
6157   if (!TLI->isPredictableSelectExpensive())
6158     return false;
6159 
6160   // FIXME: This should use the same heuristics as IfConversion to determine
6161   // whether a select is better represented as a branch.
6162 
6163   // If metadata tells us that the select condition is obviously predictable,
6164   // then we want to replace the select with a branch.
6165   uint64_t TrueWeight, FalseWeight;
6166   if (SI->extractProfMetadata(TrueWeight, FalseWeight)) {
6167     uint64_t Max = std::max(TrueWeight, FalseWeight);
6168     uint64_t Sum = TrueWeight + FalseWeight;
6169     if (Sum != 0) {
6170       auto Probability = BranchProbability::getBranchProbability(Max, Sum);
6171       if (Probability > TLI->getPredictableBranchThreshold())
6172         return true;
6173     }
6174   }
6175 
6176   CmpInst *Cmp = dyn_cast<CmpInst>(SI->getCondition());
6177 
6178   // If a branch is predictable, an out-of-order CPU can avoid blocking on its
6179   // comparison condition. If the compare has more than one use, there's
6180   // probably another cmov or setcc around, so it's not worth emitting a branch.
6181   if (!Cmp || !Cmp->hasOneUse())
6182     return false;
6183 
6184   // If either operand of the select is expensive and only needed on one side
6185   // of the select, we should form a branch.
6186   if (sinkSelectOperand(TTI, SI->getTrueValue()) ||
6187       sinkSelectOperand(TTI, SI->getFalseValue()))
6188     return true;
6189 
6190   return false;
6191 }
6192 
6193 /// If \p isTrue is true, return the true value of \p SI, otherwise return
6194 /// false value of \p SI. If the true/false value of \p SI is defined by any
6195 /// select instructions in \p Selects, look through the defining select
6196 /// instruction until the true/false value is not defined in \p Selects.
6197 static Value *getTrueOrFalseValue(
6198     SelectInst *SI, bool isTrue,
6199     const SmallPtrSet<const Instruction *, 2> &Selects) {
6200   Value *V = nullptr;
6201 
6202   for (SelectInst *DefSI = SI; DefSI != nullptr && Selects.count(DefSI);
6203        DefSI = dyn_cast<SelectInst>(V)) {
6204     assert(DefSI->getCondition() == SI->getCondition() &&
6205            "The condition of DefSI does not match with SI");
6206     V = (isTrue ? DefSI->getTrueValue() : DefSI->getFalseValue());
6207   }
6208 
6209   assert(V && "Failed to get select true/false value");
6210   return V;
6211 }
6212 
6213 bool CodeGenPrepare::optimizeShiftInst(BinaryOperator *Shift) {
6214   assert(Shift->isShift() && "Expected a shift");
6215 
6216   // If this is (1) a vector shift, (2) shifts by scalars are cheaper than
6217   // general vector shifts, and (3) the shift amount is a select-of-splatted
6218   // values, hoist the shifts before the select:
6219   //   shift Op0, (select Cond, TVal, FVal) -->
6220   //   select Cond, (shift Op0, TVal), (shift Op0, FVal)
6221   //
6222   // This is inverting a generic IR transform when we know that the cost of a
6223   // general vector shift is more than the cost of 2 shift-by-scalars.
6224   // We can't do this effectively in SDAG because we may not be able to
6225   // determine if the select operands are splats from within a basic block.
6226   Type *Ty = Shift->getType();
6227   if (!Ty->isVectorTy() || !TLI->isVectorShiftByScalarCheap(Ty))
6228     return false;
6229   Value *Cond, *TVal, *FVal;
6230   if (!match(Shift->getOperand(1),
6231              m_OneUse(m_Select(m_Value(Cond), m_Value(TVal), m_Value(FVal)))))
6232     return false;
6233   if (!isSplatValue(TVal) || !isSplatValue(FVal))
6234     return false;
6235 
6236   IRBuilder<> Builder(Shift);
6237   BinaryOperator::BinaryOps Opcode = Shift->getOpcode();
6238   Value *NewTVal = Builder.CreateBinOp(Opcode, Shift->getOperand(0), TVal);
6239   Value *NewFVal = Builder.CreateBinOp(Opcode, Shift->getOperand(0), FVal);
6240   Value *NewSel = Builder.CreateSelect(Cond, NewTVal, NewFVal);
6241   Shift->replaceAllUsesWith(NewSel);
6242   Shift->eraseFromParent();
6243   return true;
6244 }
6245 
6246 bool CodeGenPrepare::optimizeFunnelShift(IntrinsicInst *Fsh) {
6247   Intrinsic::ID Opcode = Fsh->getIntrinsicID();
6248   assert((Opcode == Intrinsic::fshl || Opcode == Intrinsic::fshr) &&
6249          "Expected a funnel shift");
6250 
6251   // If this is (1) a vector funnel shift, (2) shifts by scalars are cheaper
6252   // than general vector shifts, and (3) the shift amount is select-of-splatted
6253   // values, hoist the funnel shifts before the select:
6254   //   fsh Op0, Op1, (select Cond, TVal, FVal) -->
6255   //   select Cond, (fsh Op0, Op1, TVal), (fsh Op0, Op1, FVal)
6256   //
6257   // This is inverting a generic IR transform when we know that the cost of a
6258   // general vector shift is more than the cost of 2 shift-by-scalars.
6259   // We can't do this effectively in SDAG because we may not be able to
6260   // determine if the select operands are splats from within a basic block.
6261   Type *Ty = Fsh->getType();
6262   if (!Ty->isVectorTy() || !TLI->isVectorShiftByScalarCheap(Ty))
6263     return false;
6264   Value *Cond, *TVal, *FVal;
6265   if (!match(Fsh->getOperand(2),
6266              m_OneUse(m_Select(m_Value(Cond), m_Value(TVal), m_Value(FVal)))))
6267     return false;
6268   if (!isSplatValue(TVal) || !isSplatValue(FVal))
6269     return false;
6270 
6271   IRBuilder<> Builder(Fsh);
6272   Value *X = Fsh->getOperand(0), *Y = Fsh->getOperand(1);
6273   Value *NewTVal = Builder.CreateIntrinsic(Opcode, Ty, { X, Y, TVal });
6274   Value *NewFVal = Builder.CreateIntrinsic(Opcode, Ty, { X, Y, FVal });
6275   Value *NewSel = Builder.CreateSelect(Cond, NewTVal, NewFVal);
6276   Fsh->replaceAllUsesWith(NewSel);
6277   Fsh->eraseFromParent();
6278   return true;
6279 }
6280 
6281 /// If we have a SelectInst that will likely profit from branch prediction,
6282 /// turn it into a branch.
6283 bool CodeGenPrepare::optimizeSelectInst(SelectInst *SI) {
6284   // If branch conversion isn't desirable, exit early.
6285   if (DisableSelectToBranch || OptSize ||
6286       llvm::shouldOptimizeForSize(SI->getParent(), PSI, BFI.get()))
6287     return false;
6288 
6289   // Find all consecutive select instructions that share the same condition.
6290   SmallVector<SelectInst *, 2> ASI;
6291   ASI.push_back(SI);
6292   for (BasicBlock::iterator It = ++BasicBlock::iterator(SI);
6293        It != SI->getParent()->end(); ++It) {
6294     SelectInst *I = dyn_cast<SelectInst>(&*It);
6295     if (I && SI->getCondition() == I->getCondition()) {
6296       ASI.push_back(I);
6297     } else {
6298       break;
6299     }
6300   }
6301 
6302   SelectInst *LastSI = ASI.back();
6303   // Increment the current iterator to skip all the rest of select instructions
6304   // because they will be either "not lowered" or "all lowered" to branch.
6305   CurInstIterator = std::next(LastSI->getIterator());
6306 
6307   bool VectorCond = !SI->getCondition()->getType()->isIntegerTy(1);
6308 
6309   // Can we convert the 'select' to CF ?
6310   if (VectorCond || SI->getMetadata(LLVMContext::MD_unpredictable))
6311     return false;
6312 
6313   TargetLowering::SelectSupportKind SelectKind;
6314   if (VectorCond)
6315     SelectKind = TargetLowering::VectorMaskSelect;
6316   else if (SI->getType()->isVectorTy())
6317     SelectKind = TargetLowering::ScalarCondVectorVal;
6318   else
6319     SelectKind = TargetLowering::ScalarValSelect;
6320 
6321   if (TLI->isSelectSupported(SelectKind) &&
6322       !isFormingBranchFromSelectProfitable(TTI, TLI, SI))
6323     return false;
6324 
6325   // The DominatorTree needs to be rebuilt by any consumers after this
6326   // transformation. We simply reset here rather than setting the ModifiedDT
6327   // flag to avoid restarting the function walk in runOnFunction for each
6328   // select optimized.
6329   DT.reset();
6330 
6331   // Transform a sequence like this:
6332   //    start:
6333   //       %cmp = cmp uge i32 %a, %b
6334   //       %sel = select i1 %cmp, i32 %c, i32 %d
6335   //
6336   // Into:
6337   //    start:
6338   //       %cmp = cmp uge i32 %a, %b
6339   //       %cmp.frozen = freeze %cmp
6340   //       br i1 %cmp.frozen, label %select.true, label %select.false
6341   //    select.true:
6342   //       br label %select.end
6343   //    select.false:
6344   //       br label %select.end
6345   //    select.end:
6346   //       %sel = phi i32 [ %c, %select.true ], [ %d, %select.false ]
6347   //
6348   // %cmp should be frozen, otherwise it may introduce undefined behavior.
6349   // In addition, we may sink instructions that produce %c or %d from
6350   // the entry block into the destination(s) of the new branch.
6351   // If the true or false blocks do not contain a sunken instruction, that
6352   // block and its branch may be optimized away. In that case, one side of the
6353   // first branch will point directly to select.end, and the corresponding PHI
6354   // predecessor block will be the start block.
6355 
6356   // First, we split the block containing the select into 2 blocks.
6357   BasicBlock *StartBlock = SI->getParent();
6358   BasicBlock::iterator SplitPt = ++(BasicBlock::iterator(LastSI));
6359   BasicBlock *EndBlock = StartBlock->splitBasicBlock(SplitPt, "select.end");
6360   BFI->setBlockFreq(EndBlock, BFI->getBlockFreq(StartBlock).getFrequency());
6361 
6362   // Delete the unconditional branch that was just created by the split.
6363   StartBlock->getTerminator()->eraseFromParent();
6364 
6365   // These are the new basic blocks for the conditional branch.
6366   // At least one will become an actual new basic block.
6367   BasicBlock *TrueBlock = nullptr;
6368   BasicBlock *FalseBlock = nullptr;
6369   BranchInst *TrueBranch = nullptr;
6370   BranchInst *FalseBranch = nullptr;
6371 
6372   // Sink expensive instructions into the conditional blocks to avoid executing
6373   // them speculatively.
6374   for (SelectInst *SI : ASI) {
6375     if (sinkSelectOperand(TTI, SI->getTrueValue())) {
6376       if (TrueBlock == nullptr) {
6377         TrueBlock = BasicBlock::Create(SI->getContext(), "select.true.sink",
6378                                        EndBlock->getParent(), EndBlock);
6379         TrueBranch = BranchInst::Create(EndBlock, TrueBlock);
6380         TrueBranch->setDebugLoc(SI->getDebugLoc());
6381       }
6382       auto *TrueInst = cast<Instruction>(SI->getTrueValue());
6383       TrueInst->moveBefore(TrueBranch);
6384     }
6385     if (sinkSelectOperand(TTI, SI->getFalseValue())) {
6386       if (FalseBlock == nullptr) {
6387         FalseBlock = BasicBlock::Create(SI->getContext(), "select.false.sink",
6388                                         EndBlock->getParent(), EndBlock);
6389         FalseBranch = BranchInst::Create(EndBlock, FalseBlock);
6390         FalseBranch->setDebugLoc(SI->getDebugLoc());
6391       }
6392       auto *FalseInst = cast<Instruction>(SI->getFalseValue());
6393       FalseInst->moveBefore(FalseBranch);
6394     }
6395   }
6396 
6397   // If there was nothing to sink, then arbitrarily choose the 'false' side
6398   // for a new input value to the PHI.
6399   if (TrueBlock == FalseBlock) {
6400     assert(TrueBlock == nullptr &&
6401            "Unexpected basic block transform while optimizing select");
6402 
6403     FalseBlock = BasicBlock::Create(SI->getContext(), "select.false",
6404                                     EndBlock->getParent(), EndBlock);
6405     auto *FalseBranch = BranchInst::Create(EndBlock, FalseBlock);
6406     FalseBranch->setDebugLoc(SI->getDebugLoc());
6407   }
6408 
6409   // Insert the real conditional branch based on the original condition.
6410   // If we did not create a new block for one of the 'true' or 'false' paths
6411   // of the condition, it means that side of the branch goes to the end block
6412   // directly and the path originates from the start block from the point of
6413   // view of the new PHI.
6414   BasicBlock *TT, *FT;
6415   if (TrueBlock == nullptr) {
6416     TT = EndBlock;
6417     FT = FalseBlock;
6418     TrueBlock = StartBlock;
6419   } else if (FalseBlock == nullptr) {
6420     TT = TrueBlock;
6421     FT = EndBlock;
6422     FalseBlock = StartBlock;
6423   } else {
6424     TT = TrueBlock;
6425     FT = FalseBlock;
6426   }
6427   IRBuilder<> IB(SI);
6428   auto *CondFr = IB.CreateFreeze(SI->getCondition(), SI->getName() + ".frozen");
6429   IB.CreateCondBr(CondFr, TT, FT, SI);
6430 
6431   SmallPtrSet<const Instruction *, 2> INS;
6432   INS.insert(ASI.begin(), ASI.end());
6433   // Use reverse iterator because later select may use the value of the
6434   // earlier select, and we need to propagate value through earlier select
6435   // to get the PHI operand.
6436   for (auto It = ASI.rbegin(); It != ASI.rend(); ++It) {
6437     SelectInst *SI = *It;
6438     // The select itself is replaced with a PHI Node.
6439     PHINode *PN = PHINode::Create(SI->getType(), 2, "", &EndBlock->front());
6440     PN->takeName(SI);
6441     PN->addIncoming(getTrueOrFalseValue(SI, true, INS), TrueBlock);
6442     PN->addIncoming(getTrueOrFalseValue(SI, false, INS), FalseBlock);
6443     PN->setDebugLoc(SI->getDebugLoc());
6444 
6445     SI->replaceAllUsesWith(PN);
6446     SI->eraseFromParent();
6447     INS.erase(SI);
6448     ++NumSelectsExpanded;
6449   }
6450 
6451   // Instruct OptimizeBlock to skip to the next block.
6452   CurInstIterator = StartBlock->end();
6453   return true;
6454 }
6455 
6456 /// Some targets only accept certain types for splat inputs. For example a VDUP
6457 /// in MVE takes a GPR (integer) register, and the instruction that incorporate
6458 /// a VDUP (such as a VADD qd, qm, rm) also require a gpr register.
6459 bool CodeGenPrepare::optimizeShuffleVectorInst(ShuffleVectorInst *SVI) {
6460   if (!match(SVI, m_Shuffle(m_InsertElt(m_Undef(), m_Value(), m_ZeroInt()),
6461                             m_Undef(), m_ZeroMask())))
6462     return false;
6463   Type *NewType = TLI->shouldConvertSplatType(SVI);
6464   if (!NewType)
6465     return false;
6466 
6467   VectorType *SVIVecType = cast<VectorType>(SVI->getType());
6468   assert(!NewType->isVectorTy() && "Expected a scalar type!");
6469   assert(NewType->getScalarSizeInBits() == SVIVecType->getScalarSizeInBits() &&
6470          "Expected a type of the same size!");
6471   auto *NewVecType =
6472       FixedVectorType::get(NewType, SVIVecType->getNumElements());
6473 
6474   // Create a bitcast (shuffle (insert (bitcast(..))))
6475   IRBuilder<> Builder(SVI->getContext());
6476   Builder.SetInsertPoint(SVI);
6477   Value *BC1 = Builder.CreateBitCast(
6478       cast<Instruction>(SVI->getOperand(0))->getOperand(1), NewType);
6479   Value *Insert = Builder.CreateInsertElement(UndefValue::get(NewVecType), BC1,
6480                                               (uint64_t)0);
6481   Value *Shuffle = Builder.CreateShuffleVector(
6482       Insert, UndefValue::get(NewVecType), SVI->getShuffleMask());
6483   Value *BC2 = Builder.CreateBitCast(Shuffle, SVIVecType);
6484 
6485   SVI->replaceAllUsesWith(BC2);
6486   RecursivelyDeleteTriviallyDeadInstructions(SVI);
6487 
6488   // Also hoist the bitcast up to its operand if it they are not in the same
6489   // block.
6490   if (auto *BCI = dyn_cast<Instruction>(BC1))
6491     if (auto *Op = dyn_cast<Instruction>(BCI->getOperand(0)))
6492       if (BCI->getParent() != Op->getParent() && !isa<PHINode>(Op) &&
6493           !Op->isTerminator() && !Op->isEHPad())
6494         BCI->moveAfter(Op);
6495 
6496   return true;
6497 }
6498 
6499 bool CodeGenPrepare::tryToSinkFreeOperands(Instruction *I) {
6500   // If the operands of I can be folded into a target instruction together with
6501   // I, duplicate and sink them.
6502   SmallVector<Use *, 4> OpsToSink;
6503   if (!TLI->shouldSinkOperands(I, OpsToSink))
6504     return false;
6505 
6506   // OpsToSink can contain multiple uses in a use chain (e.g.
6507   // (%u1 with %u1 = shufflevector), (%u2 with %u2 = zext %u1)). The dominating
6508   // uses must come first, so we process the ops in reverse order so as to not
6509   // create invalid IR.
6510   BasicBlock *TargetBB = I->getParent();
6511   bool Changed = false;
6512   SmallVector<Use *, 4> ToReplace;
6513   for (Use *U : reverse(OpsToSink)) {
6514     auto *UI = cast<Instruction>(U->get());
6515     if (UI->getParent() == TargetBB || isa<PHINode>(UI))
6516       continue;
6517     ToReplace.push_back(U);
6518   }
6519 
6520   SetVector<Instruction *> MaybeDead;
6521   DenseMap<Instruction *, Instruction *> NewInstructions;
6522   Instruction *InsertPoint = I;
6523   for (Use *U : ToReplace) {
6524     auto *UI = cast<Instruction>(U->get());
6525     Instruction *NI = UI->clone();
6526     NewInstructions[UI] = NI;
6527     MaybeDead.insert(UI);
6528     LLVM_DEBUG(dbgs() << "Sinking " << *UI << " to user " << *I << "\n");
6529     NI->insertBefore(InsertPoint);
6530     InsertPoint = NI;
6531     InsertedInsts.insert(NI);
6532 
6533     // Update the use for the new instruction, making sure that we update the
6534     // sunk instruction uses, if it is part of a chain that has already been
6535     // sunk.
6536     Instruction *OldI = cast<Instruction>(U->getUser());
6537     if (NewInstructions.count(OldI))
6538       NewInstructions[OldI]->setOperand(U->getOperandNo(), NI);
6539     else
6540       U->set(NI);
6541     Changed = true;
6542   }
6543 
6544   // Remove instructions that are dead after sinking.
6545   for (auto *I : MaybeDead) {
6546     if (!I->hasNUsesOrMore(1)) {
6547       LLVM_DEBUG(dbgs() << "Removing dead instruction: " << *I << "\n");
6548       I->eraseFromParent();
6549     }
6550   }
6551 
6552   return Changed;
6553 }
6554 
6555 bool CodeGenPrepare::optimizeSwitchInst(SwitchInst *SI) {
6556   Value *Cond = SI->getCondition();
6557   Type *OldType = Cond->getType();
6558   LLVMContext &Context = Cond->getContext();
6559   MVT RegType = TLI->getRegisterType(Context, TLI->getValueType(*DL, OldType));
6560   unsigned RegWidth = RegType.getSizeInBits();
6561 
6562   if (RegWidth <= cast<IntegerType>(OldType)->getBitWidth())
6563     return false;
6564 
6565   // If the register width is greater than the type width, expand the condition
6566   // of the switch instruction and each case constant to the width of the
6567   // register. By widening the type of the switch condition, subsequent
6568   // comparisons (for case comparisons) will not need to be extended to the
6569   // preferred register width, so we will potentially eliminate N-1 extends,
6570   // where N is the number of cases in the switch.
6571   auto *NewType = Type::getIntNTy(Context, RegWidth);
6572 
6573   // Zero-extend the switch condition and case constants unless the switch
6574   // condition is a function argument that is already being sign-extended.
6575   // In that case, we can avoid an unnecessary mask/extension by sign-extending
6576   // everything instead.
6577   Instruction::CastOps ExtType = Instruction::ZExt;
6578   if (auto *Arg = dyn_cast<Argument>(Cond))
6579     if (Arg->hasSExtAttr())
6580       ExtType = Instruction::SExt;
6581 
6582   auto *ExtInst = CastInst::Create(ExtType, Cond, NewType);
6583   ExtInst->insertBefore(SI);
6584   ExtInst->setDebugLoc(SI->getDebugLoc());
6585   SI->setCondition(ExtInst);
6586   for (auto Case : SI->cases()) {
6587     APInt NarrowConst = Case.getCaseValue()->getValue();
6588     APInt WideConst = (ExtType == Instruction::ZExt) ?
6589                       NarrowConst.zext(RegWidth) : NarrowConst.sext(RegWidth);
6590     Case.setValue(ConstantInt::get(Context, WideConst));
6591   }
6592 
6593   return true;
6594 }
6595 
6596 
6597 namespace {
6598 
6599 /// Helper class to promote a scalar operation to a vector one.
6600 /// This class is used to move downward extractelement transition.
6601 /// E.g.,
6602 /// a = vector_op <2 x i32>
6603 /// b = extractelement <2 x i32> a, i32 0
6604 /// c = scalar_op b
6605 /// store c
6606 ///
6607 /// =>
6608 /// a = vector_op <2 x i32>
6609 /// c = vector_op a (equivalent to scalar_op on the related lane)
6610 /// * d = extractelement <2 x i32> c, i32 0
6611 /// * store d
6612 /// Assuming both extractelement and store can be combine, we get rid of the
6613 /// transition.
6614 class VectorPromoteHelper {
6615   /// DataLayout associated with the current module.
6616   const DataLayout &DL;
6617 
6618   /// Used to perform some checks on the legality of vector operations.
6619   const TargetLowering &TLI;
6620 
6621   /// Used to estimated the cost of the promoted chain.
6622   const TargetTransformInfo &TTI;
6623 
6624   /// The transition being moved downwards.
6625   Instruction *Transition;
6626 
6627   /// The sequence of instructions to be promoted.
6628   SmallVector<Instruction *, 4> InstsToBePromoted;
6629 
6630   /// Cost of combining a store and an extract.
6631   unsigned StoreExtractCombineCost;
6632 
6633   /// Instruction that will be combined with the transition.
6634   Instruction *CombineInst = nullptr;
6635 
6636   /// The instruction that represents the current end of the transition.
6637   /// Since we are faking the promotion until we reach the end of the chain
6638   /// of computation, we need a way to get the current end of the transition.
6639   Instruction *getEndOfTransition() const {
6640     if (InstsToBePromoted.empty())
6641       return Transition;
6642     return InstsToBePromoted.back();
6643   }
6644 
6645   /// Return the index of the original value in the transition.
6646   /// E.g., for "extractelement <2 x i32> c, i32 1" the original value,
6647   /// c, is at index 0.
6648   unsigned getTransitionOriginalValueIdx() const {
6649     assert(isa<ExtractElementInst>(Transition) &&
6650            "Other kind of transitions are not supported yet");
6651     return 0;
6652   }
6653 
6654   /// Return the index of the index in the transition.
6655   /// E.g., for "extractelement <2 x i32> c, i32 0" the index
6656   /// is at index 1.
6657   unsigned getTransitionIdx() const {
6658     assert(isa<ExtractElementInst>(Transition) &&
6659            "Other kind of transitions are not supported yet");
6660     return 1;
6661   }
6662 
6663   /// Get the type of the transition.
6664   /// This is the type of the original value.
6665   /// E.g., for "extractelement <2 x i32> c, i32 1" the type of the
6666   /// transition is <2 x i32>.
6667   Type *getTransitionType() const {
6668     return Transition->getOperand(getTransitionOriginalValueIdx())->getType();
6669   }
6670 
6671   /// Promote \p ToBePromoted by moving \p Def downward through.
6672   /// I.e., we have the following sequence:
6673   /// Def = Transition <ty1> a to <ty2>
6674   /// b = ToBePromoted <ty2> Def, ...
6675   /// =>
6676   /// b = ToBePromoted <ty1> a, ...
6677   /// Def = Transition <ty1> ToBePromoted to <ty2>
6678   void promoteImpl(Instruction *ToBePromoted);
6679 
6680   /// Check whether or not it is profitable to promote all the
6681   /// instructions enqueued to be promoted.
6682   bool isProfitableToPromote() {
6683     Value *ValIdx = Transition->getOperand(getTransitionOriginalValueIdx());
6684     unsigned Index = isa<ConstantInt>(ValIdx)
6685                          ? cast<ConstantInt>(ValIdx)->getZExtValue()
6686                          : -1;
6687     Type *PromotedType = getTransitionType();
6688 
6689     StoreInst *ST = cast<StoreInst>(CombineInst);
6690     unsigned AS = ST->getPointerAddressSpace();
6691     unsigned Align = ST->getAlignment();
6692     // Check if this store is supported.
6693     if (!TLI.allowsMisalignedMemoryAccesses(
6694             TLI.getValueType(DL, ST->getValueOperand()->getType()), AS,
6695             Align)) {
6696       // If this is not supported, there is no way we can combine
6697       // the extract with the store.
6698       return false;
6699     }
6700 
6701     // The scalar chain of computation has to pay for the transition
6702     // scalar to vector.
6703     // The vector chain has to account for the combining cost.
6704     uint64_t ScalarCost =
6705         TTI.getVectorInstrCost(Transition->getOpcode(), PromotedType, Index);
6706     uint64_t VectorCost = StoreExtractCombineCost;
6707     enum TargetTransformInfo::TargetCostKind CostKind =
6708       TargetTransformInfo::TCK_RecipThroughput;
6709     for (const auto &Inst : InstsToBePromoted) {
6710       // Compute the cost.
6711       // By construction, all instructions being promoted are arithmetic ones.
6712       // Moreover, one argument is a constant that can be viewed as a splat
6713       // constant.
6714       Value *Arg0 = Inst->getOperand(0);
6715       bool IsArg0Constant = isa<UndefValue>(Arg0) || isa<ConstantInt>(Arg0) ||
6716                             isa<ConstantFP>(Arg0);
6717       TargetTransformInfo::OperandValueKind Arg0OVK =
6718           IsArg0Constant ? TargetTransformInfo::OK_UniformConstantValue
6719                          : TargetTransformInfo::OK_AnyValue;
6720       TargetTransformInfo::OperandValueKind Arg1OVK =
6721           !IsArg0Constant ? TargetTransformInfo::OK_UniformConstantValue
6722                           : TargetTransformInfo::OK_AnyValue;
6723       ScalarCost += TTI.getArithmeticInstrCost(
6724           Inst->getOpcode(), Inst->getType(), CostKind, Arg0OVK, Arg1OVK);
6725       VectorCost += TTI.getArithmeticInstrCost(Inst->getOpcode(), PromotedType,
6726                                                CostKind,
6727                                                Arg0OVK, Arg1OVK);
6728     }
6729     LLVM_DEBUG(
6730         dbgs() << "Estimated cost of computation to be promoted:\nScalar: "
6731                << ScalarCost << "\nVector: " << VectorCost << '\n');
6732     return ScalarCost > VectorCost;
6733   }
6734 
6735   /// Generate a constant vector with \p Val with the same
6736   /// number of elements as the transition.
6737   /// \p UseSplat defines whether or not \p Val should be replicated
6738   /// across the whole vector.
6739   /// In other words, if UseSplat == true, we generate <Val, Val, ..., Val>,
6740   /// otherwise we generate a vector with as many undef as possible:
6741   /// <undef, ..., undef, Val, undef, ..., undef> where \p Val is only
6742   /// used at the index of the extract.
6743   Value *getConstantVector(Constant *Val, bool UseSplat) const {
6744     unsigned ExtractIdx = std::numeric_limits<unsigned>::max();
6745     if (!UseSplat) {
6746       // If we cannot determine where the constant must be, we have to
6747       // use a splat constant.
6748       Value *ValExtractIdx = Transition->getOperand(getTransitionIdx());
6749       if (ConstantInt *CstVal = dyn_cast<ConstantInt>(ValExtractIdx))
6750         ExtractIdx = CstVal->getSExtValue();
6751       else
6752         UseSplat = true;
6753     }
6754 
6755     ElementCount EC = cast<VectorType>(getTransitionType())->getElementCount();
6756     if (UseSplat)
6757       return ConstantVector::getSplat(EC, Val);
6758 
6759     if (!EC.Scalable) {
6760       SmallVector<Constant *, 4> ConstVec;
6761       UndefValue *UndefVal = UndefValue::get(Val->getType());
6762       for (unsigned Idx = 0; Idx != EC.Min; ++Idx) {
6763         if (Idx == ExtractIdx)
6764           ConstVec.push_back(Val);
6765         else
6766           ConstVec.push_back(UndefVal);
6767       }
6768       return ConstantVector::get(ConstVec);
6769     } else
6770       llvm_unreachable(
6771           "Generate scalable vector for non-splat is unimplemented");
6772   }
6773 
6774   /// Check if promoting to a vector type an operand at \p OperandIdx
6775   /// in \p Use can trigger undefined behavior.
6776   static bool canCauseUndefinedBehavior(const Instruction *Use,
6777                                         unsigned OperandIdx) {
6778     // This is not safe to introduce undef when the operand is on
6779     // the right hand side of a division-like instruction.
6780     if (OperandIdx != 1)
6781       return false;
6782     switch (Use->getOpcode()) {
6783     default:
6784       return false;
6785     case Instruction::SDiv:
6786     case Instruction::UDiv:
6787     case Instruction::SRem:
6788     case Instruction::URem:
6789       return true;
6790     case Instruction::FDiv:
6791     case Instruction::FRem:
6792       return !Use->hasNoNaNs();
6793     }
6794     llvm_unreachable(nullptr);
6795   }
6796 
6797 public:
6798   VectorPromoteHelper(const DataLayout &DL, const TargetLowering &TLI,
6799                       const TargetTransformInfo &TTI, Instruction *Transition,
6800                       unsigned CombineCost)
6801       : DL(DL), TLI(TLI), TTI(TTI), Transition(Transition),
6802         StoreExtractCombineCost(CombineCost) {
6803     assert(Transition && "Do not know how to promote null");
6804   }
6805 
6806   /// Check if we can promote \p ToBePromoted to \p Type.
6807   bool canPromote(const Instruction *ToBePromoted) const {
6808     // We could support CastInst too.
6809     return isa<BinaryOperator>(ToBePromoted);
6810   }
6811 
6812   /// Check if it is profitable to promote \p ToBePromoted
6813   /// by moving downward the transition through.
6814   bool shouldPromote(const Instruction *ToBePromoted) const {
6815     // Promote only if all the operands can be statically expanded.
6816     // Indeed, we do not want to introduce any new kind of transitions.
6817     for (const Use &U : ToBePromoted->operands()) {
6818       const Value *Val = U.get();
6819       if (Val == getEndOfTransition()) {
6820         // If the use is a division and the transition is on the rhs,
6821         // we cannot promote the operation, otherwise we may create a
6822         // division by zero.
6823         if (canCauseUndefinedBehavior(ToBePromoted, U.getOperandNo()))
6824           return false;
6825         continue;
6826       }
6827       if (!isa<ConstantInt>(Val) && !isa<UndefValue>(Val) &&
6828           !isa<ConstantFP>(Val))
6829         return false;
6830     }
6831     // Check that the resulting operation is legal.
6832     int ISDOpcode = TLI.InstructionOpcodeToISD(ToBePromoted->getOpcode());
6833     if (!ISDOpcode)
6834       return false;
6835     return StressStoreExtract ||
6836            TLI.isOperationLegalOrCustom(
6837                ISDOpcode, TLI.getValueType(DL, getTransitionType(), true));
6838   }
6839 
6840   /// Check whether or not \p Use can be combined
6841   /// with the transition.
6842   /// I.e., is it possible to do Use(Transition) => AnotherUse?
6843   bool canCombine(const Instruction *Use) { return isa<StoreInst>(Use); }
6844 
6845   /// Record \p ToBePromoted as part of the chain to be promoted.
6846   void enqueueForPromotion(Instruction *ToBePromoted) {
6847     InstsToBePromoted.push_back(ToBePromoted);
6848   }
6849 
6850   /// Set the instruction that will be combined with the transition.
6851   void recordCombineInstruction(Instruction *ToBeCombined) {
6852     assert(canCombine(ToBeCombined) && "Unsupported instruction to combine");
6853     CombineInst = ToBeCombined;
6854   }
6855 
6856   /// Promote all the instructions enqueued for promotion if it is
6857   /// is profitable.
6858   /// \return True if the promotion happened, false otherwise.
6859   bool promote() {
6860     // Check if there is something to promote.
6861     // Right now, if we do not have anything to combine with,
6862     // we assume the promotion is not profitable.
6863     if (InstsToBePromoted.empty() || !CombineInst)
6864       return false;
6865 
6866     // Check cost.
6867     if (!StressStoreExtract && !isProfitableToPromote())
6868       return false;
6869 
6870     // Promote.
6871     for (auto &ToBePromoted : InstsToBePromoted)
6872       promoteImpl(ToBePromoted);
6873     InstsToBePromoted.clear();
6874     return true;
6875   }
6876 };
6877 
6878 } // end anonymous namespace
6879 
6880 void VectorPromoteHelper::promoteImpl(Instruction *ToBePromoted) {
6881   // At this point, we know that all the operands of ToBePromoted but Def
6882   // can be statically promoted.
6883   // For Def, we need to use its parameter in ToBePromoted:
6884   // b = ToBePromoted ty1 a
6885   // Def = Transition ty1 b to ty2
6886   // Move the transition down.
6887   // 1. Replace all uses of the promoted operation by the transition.
6888   // = ... b => = ... Def.
6889   assert(ToBePromoted->getType() == Transition->getType() &&
6890          "The type of the result of the transition does not match "
6891          "the final type");
6892   ToBePromoted->replaceAllUsesWith(Transition);
6893   // 2. Update the type of the uses.
6894   // b = ToBePromoted ty2 Def => b = ToBePromoted ty1 Def.
6895   Type *TransitionTy = getTransitionType();
6896   ToBePromoted->mutateType(TransitionTy);
6897   // 3. Update all the operands of the promoted operation with promoted
6898   // operands.
6899   // b = ToBePromoted ty1 Def => b = ToBePromoted ty1 a.
6900   for (Use &U : ToBePromoted->operands()) {
6901     Value *Val = U.get();
6902     Value *NewVal = nullptr;
6903     if (Val == Transition)
6904       NewVal = Transition->getOperand(getTransitionOriginalValueIdx());
6905     else if (isa<UndefValue>(Val) || isa<ConstantInt>(Val) ||
6906              isa<ConstantFP>(Val)) {
6907       // Use a splat constant if it is not safe to use undef.
6908       NewVal = getConstantVector(
6909           cast<Constant>(Val),
6910           isa<UndefValue>(Val) ||
6911               canCauseUndefinedBehavior(ToBePromoted, U.getOperandNo()));
6912     } else
6913       llvm_unreachable("Did you modified shouldPromote and forgot to update "
6914                        "this?");
6915     ToBePromoted->setOperand(U.getOperandNo(), NewVal);
6916   }
6917   Transition->moveAfter(ToBePromoted);
6918   Transition->setOperand(getTransitionOriginalValueIdx(), ToBePromoted);
6919 }
6920 
6921 /// Some targets can do store(extractelement) with one instruction.
6922 /// Try to push the extractelement towards the stores when the target
6923 /// has this feature and this is profitable.
6924 bool CodeGenPrepare::optimizeExtractElementInst(Instruction *Inst) {
6925   unsigned CombineCost = std::numeric_limits<unsigned>::max();
6926   if (DisableStoreExtract ||
6927       (!StressStoreExtract &&
6928        !TLI->canCombineStoreAndExtract(Inst->getOperand(0)->getType(),
6929                                        Inst->getOperand(1), CombineCost)))
6930     return false;
6931 
6932   // At this point we know that Inst is a vector to scalar transition.
6933   // Try to move it down the def-use chain, until:
6934   // - We can combine the transition with its single use
6935   //   => we got rid of the transition.
6936   // - We escape the current basic block
6937   //   => we would need to check that we are moving it at a cheaper place and
6938   //      we do not do that for now.
6939   BasicBlock *Parent = Inst->getParent();
6940   LLVM_DEBUG(dbgs() << "Found an interesting transition: " << *Inst << '\n');
6941   VectorPromoteHelper VPH(*DL, *TLI, *TTI, Inst, CombineCost);
6942   // If the transition has more than one use, assume this is not going to be
6943   // beneficial.
6944   while (Inst->hasOneUse()) {
6945     Instruction *ToBePromoted = cast<Instruction>(*Inst->user_begin());
6946     LLVM_DEBUG(dbgs() << "Use: " << *ToBePromoted << '\n');
6947 
6948     if (ToBePromoted->getParent() != Parent) {
6949       LLVM_DEBUG(dbgs() << "Instruction to promote is in a different block ("
6950                         << ToBePromoted->getParent()->getName()
6951                         << ") than the transition (" << Parent->getName()
6952                         << ").\n");
6953       return false;
6954     }
6955 
6956     if (VPH.canCombine(ToBePromoted)) {
6957       LLVM_DEBUG(dbgs() << "Assume " << *Inst << '\n'
6958                         << "will be combined with: " << *ToBePromoted << '\n');
6959       VPH.recordCombineInstruction(ToBePromoted);
6960       bool Changed = VPH.promote();
6961       NumStoreExtractExposed += Changed;
6962       return Changed;
6963     }
6964 
6965     LLVM_DEBUG(dbgs() << "Try promoting.\n");
6966     if (!VPH.canPromote(ToBePromoted) || !VPH.shouldPromote(ToBePromoted))
6967       return false;
6968 
6969     LLVM_DEBUG(dbgs() << "Promoting is possible... Enqueue for promotion!\n");
6970 
6971     VPH.enqueueForPromotion(ToBePromoted);
6972     Inst = ToBePromoted;
6973   }
6974   return false;
6975 }
6976 
6977 /// For the instruction sequence of store below, F and I values
6978 /// are bundled together as an i64 value before being stored into memory.
6979 /// Sometimes it is more efficient to generate separate stores for F and I,
6980 /// which can remove the bitwise instructions or sink them to colder places.
6981 ///
6982 ///   (store (or (zext (bitcast F to i32) to i64),
6983 ///              (shl (zext I to i64), 32)), addr)  -->
6984 ///   (store F, addr) and (store I, addr+4)
6985 ///
6986 /// Similarly, splitting for other merged store can also be beneficial, like:
6987 /// For pair of {i32, i32}, i64 store --> two i32 stores.
6988 /// For pair of {i32, i16}, i64 store --> two i32 stores.
6989 /// For pair of {i16, i16}, i32 store --> two i16 stores.
6990 /// For pair of {i16, i8},  i32 store --> two i16 stores.
6991 /// For pair of {i8, i8},   i16 store --> two i8 stores.
6992 ///
6993 /// We allow each target to determine specifically which kind of splitting is
6994 /// supported.
6995 ///
6996 /// The store patterns are commonly seen from the simple code snippet below
6997 /// if only std::make_pair(...) is sroa transformed before inlined into hoo.
6998 ///   void goo(const std::pair<int, float> &);
6999 ///   hoo() {
7000 ///     ...
7001 ///     goo(std::make_pair(tmp, ftmp));
7002 ///     ...
7003 ///   }
7004 ///
7005 /// Although we already have similar splitting in DAG Combine, we duplicate
7006 /// it in CodeGenPrepare to catch the case in which pattern is across
7007 /// multiple BBs. The logic in DAG Combine is kept to catch case generated
7008 /// during code expansion.
7009 static bool splitMergedValStore(StoreInst &SI, const DataLayout &DL,
7010                                 const TargetLowering &TLI) {
7011   // Handle simple but common cases only.
7012   Type *StoreType = SI.getValueOperand()->getType();
7013 
7014   // The code below assumes shifting a value by <number of bits>,
7015   // whereas scalable vectors would have to be shifted by
7016   // <2log(vscale) + number of bits> in order to store the
7017   // low/high parts. Bailing out for now.
7018   if (isa<ScalableVectorType>(StoreType))
7019     return false;
7020 
7021   if (!DL.typeSizeEqualsStoreSize(StoreType) ||
7022       DL.getTypeSizeInBits(StoreType) == 0)
7023     return false;
7024 
7025   unsigned HalfValBitSize = DL.getTypeSizeInBits(StoreType) / 2;
7026   Type *SplitStoreType = Type::getIntNTy(SI.getContext(), HalfValBitSize);
7027   if (!DL.typeSizeEqualsStoreSize(SplitStoreType))
7028     return false;
7029 
7030   // Don't split the store if it is volatile.
7031   if (SI.isVolatile())
7032     return false;
7033 
7034   // Match the following patterns:
7035   // (store (or (zext LValue to i64),
7036   //            (shl (zext HValue to i64), 32)), HalfValBitSize)
7037   //  or
7038   // (store (or (shl (zext HValue to i64), 32)), HalfValBitSize)
7039   //            (zext LValue to i64),
7040   // Expect both operands of OR and the first operand of SHL have only
7041   // one use.
7042   Value *LValue, *HValue;
7043   if (!match(SI.getValueOperand(),
7044              m_c_Or(m_OneUse(m_ZExt(m_Value(LValue))),
7045                     m_OneUse(m_Shl(m_OneUse(m_ZExt(m_Value(HValue))),
7046                                    m_SpecificInt(HalfValBitSize))))))
7047     return false;
7048 
7049   // Check LValue and HValue are int with size less or equal than 32.
7050   if (!LValue->getType()->isIntegerTy() ||
7051       DL.getTypeSizeInBits(LValue->getType()) > HalfValBitSize ||
7052       !HValue->getType()->isIntegerTy() ||
7053       DL.getTypeSizeInBits(HValue->getType()) > HalfValBitSize)
7054     return false;
7055 
7056   // If LValue/HValue is a bitcast instruction, use the EVT before bitcast
7057   // as the input of target query.
7058   auto *LBC = dyn_cast<BitCastInst>(LValue);
7059   auto *HBC = dyn_cast<BitCastInst>(HValue);
7060   EVT LowTy = LBC ? EVT::getEVT(LBC->getOperand(0)->getType())
7061                   : EVT::getEVT(LValue->getType());
7062   EVT HighTy = HBC ? EVT::getEVT(HBC->getOperand(0)->getType())
7063                    : EVT::getEVT(HValue->getType());
7064   if (!ForceSplitStore && !TLI.isMultiStoresCheaperThanBitsMerge(LowTy, HighTy))
7065     return false;
7066 
7067   // Start to split store.
7068   IRBuilder<> Builder(SI.getContext());
7069   Builder.SetInsertPoint(&SI);
7070 
7071   // If LValue/HValue is a bitcast in another BB, create a new one in current
7072   // BB so it may be merged with the splitted stores by dag combiner.
7073   if (LBC && LBC->getParent() != SI.getParent())
7074     LValue = Builder.CreateBitCast(LBC->getOperand(0), LBC->getType());
7075   if (HBC && HBC->getParent() != SI.getParent())
7076     HValue = Builder.CreateBitCast(HBC->getOperand(0), HBC->getType());
7077 
7078   bool IsLE = SI.getModule()->getDataLayout().isLittleEndian();
7079   auto CreateSplitStore = [&](Value *V, bool Upper) {
7080     V = Builder.CreateZExtOrBitCast(V, SplitStoreType);
7081     Value *Addr = Builder.CreateBitCast(
7082         SI.getOperand(1),
7083         SplitStoreType->getPointerTo(SI.getPointerAddressSpace()));
7084     Align Alignment = SI.getAlign();
7085     const bool IsOffsetStore = (IsLE && Upper) || (!IsLE && !Upper);
7086     if (IsOffsetStore) {
7087       Addr = Builder.CreateGEP(
7088           SplitStoreType, Addr,
7089           ConstantInt::get(Type::getInt32Ty(SI.getContext()), 1));
7090 
7091       // When splitting the store in half, naturally one half will retain the
7092       // alignment of the original wider store, regardless of whether it was
7093       // over-aligned or not, while the other will require adjustment.
7094       Alignment = commonAlignment(Alignment, HalfValBitSize / 8);
7095     }
7096     Builder.CreateAlignedStore(V, Addr, Alignment);
7097   };
7098 
7099   CreateSplitStore(LValue, false);
7100   CreateSplitStore(HValue, true);
7101 
7102   // Delete the old store.
7103   SI.eraseFromParent();
7104   return true;
7105 }
7106 
7107 // Return true if the GEP has two operands, the first operand is of a sequential
7108 // type, and the second operand is a constant.
7109 static bool GEPSequentialConstIndexed(GetElementPtrInst *GEP) {
7110   gep_type_iterator I = gep_type_begin(*GEP);
7111   return GEP->getNumOperands() == 2 &&
7112       I.isSequential() &&
7113       isa<ConstantInt>(GEP->getOperand(1));
7114 }
7115 
7116 // Try unmerging GEPs to reduce liveness interference (register pressure) across
7117 // IndirectBr edges. Since IndirectBr edges tend to touch on many blocks,
7118 // reducing liveness interference across those edges benefits global register
7119 // allocation. Currently handles only certain cases.
7120 //
7121 // For example, unmerge %GEPI and %UGEPI as below.
7122 //
7123 // ---------- BEFORE ----------
7124 // SrcBlock:
7125 //   ...
7126 //   %GEPIOp = ...
7127 //   ...
7128 //   %GEPI = gep %GEPIOp, Idx
7129 //   ...
7130 //   indirectbr ... [ label %DstB0, label %DstB1, ... label %DstBi ... ]
7131 //   (* %GEPI is alive on the indirectbr edges due to other uses ahead)
7132 //   (* %GEPIOp is alive on the indirectbr edges only because of it's used by
7133 //   %UGEPI)
7134 //
7135 // DstB0: ... (there may be a gep similar to %UGEPI to be unmerged)
7136 // DstB1: ... (there may be a gep similar to %UGEPI to be unmerged)
7137 // ...
7138 //
7139 // DstBi:
7140 //   ...
7141 //   %UGEPI = gep %GEPIOp, UIdx
7142 // ...
7143 // ---------------------------
7144 //
7145 // ---------- AFTER ----------
7146 // SrcBlock:
7147 //   ... (same as above)
7148 //    (* %GEPI is still alive on the indirectbr edges)
7149 //    (* %GEPIOp is no longer alive on the indirectbr edges as a result of the
7150 //    unmerging)
7151 // ...
7152 //
7153 // DstBi:
7154 //   ...
7155 //   %UGEPI = gep %GEPI, (UIdx-Idx)
7156 //   ...
7157 // ---------------------------
7158 //
7159 // The register pressure on the IndirectBr edges is reduced because %GEPIOp is
7160 // no longer alive on them.
7161 //
7162 // We try to unmerge GEPs here in CodGenPrepare, as opposed to limiting merging
7163 // of GEPs in the first place in InstCombiner::visitGetElementPtrInst() so as
7164 // not to disable further simplications and optimizations as a result of GEP
7165 // merging.
7166 //
7167 // Note this unmerging may increase the length of the data flow critical path
7168 // (the path from %GEPIOp to %UGEPI would go through %GEPI), which is a tradeoff
7169 // between the register pressure and the length of data-flow critical
7170 // path. Restricting this to the uncommon IndirectBr case would minimize the
7171 // impact of potentially longer critical path, if any, and the impact on compile
7172 // time.
7173 static bool tryUnmergingGEPsAcrossIndirectBr(GetElementPtrInst *GEPI,
7174                                              const TargetTransformInfo *TTI) {
7175   BasicBlock *SrcBlock = GEPI->getParent();
7176   // Check that SrcBlock ends with an IndirectBr. If not, give up. The common
7177   // (non-IndirectBr) cases exit early here.
7178   if (!isa<IndirectBrInst>(SrcBlock->getTerminator()))
7179     return false;
7180   // Check that GEPI is a simple gep with a single constant index.
7181   if (!GEPSequentialConstIndexed(GEPI))
7182     return false;
7183   ConstantInt *GEPIIdx = cast<ConstantInt>(GEPI->getOperand(1));
7184   // Check that GEPI is a cheap one.
7185   if (TTI->getIntImmCost(GEPIIdx->getValue(), GEPIIdx->getType(),
7186                          TargetTransformInfo::TCK_SizeAndLatency)
7187       > TargetTransformInfo::TCC_Basic)
7188     return false;
7189   Value *GEPIOp = GEPI->getOperand(0);
7190   // Check that GEPIOp is an instruction that's also defined in SrcBlock.
7191   if (!isa<Instruction>(GEPIOp))
7192     return false;
7193   auto *GEPIOpI = cast<Instruction>(GEPIOp);
7194   if (GEPIOpI->getParent() != SrcBlock)
7195     return false;
7196   // Check that GEP is used outside the block, meaning it's alive on the
7197   // IndirectBr edge(s).
7198   if (find_if(GEPI->users(), [&](User *Usr) {
7199         if (auto *I = dyn_cast<Instruction>(Usr)) {
7200           if (I->getParent() != SrcBlock) {
7201             return true;
7202           }
7203         }
7204         return false;
7205       }) == GEPI->users().end())
7206     return false;
7207   // The second elements of the GEP chains to be unmerged.
7208   std::vector<GetElementPtrInst *> UGEPIs;
7209   // Check each user of GEPIOp to check if unmerging would make GEPIOp not alive
7210   // on IndirectBr edges.
7211   for (User *Usr : GEPIOp->users()) {
7212     if (Usr == GEPI) continue;
7213     // Check if Usr is an Instruction. If not, give up.
7214     if (!isa<Instruction>(Usr))
7215       return false;
7216     auto *UI = cast<Instruction>(Usr);
7217     // Check if Usr in the same block as GEPIOp, which is fine, skip.
7218     if (UI->getParent() == SrcBlock)
7219       continue;
7220     // Check if Usr is a GEP. If not, give up.
7221     if (!isa<GetElementPtrInst>(Usr))
7222       return false;
7223     auto *UGEPI = cast<GetElementPtrInst>(Usr);
7224     // Check if UGEPI is a simple gep with a single constant index and GEPIOp is
7225     // the pointer operand to it. If so, record it in the vector. If not, give
7226     // up.
7227     if (!GEPSequentialConstIndexed(UGEPI))
7228       return false;
7229     if (UGEPI->getOperand(0) != GEPIOp)
7230       return false;
7231     if (GEPIIdx->getType() !=
7232         cast<ConstantInt>(UGEPI->getOperand(1))->getType())
7233       return false;
7234     ConstantInt *UGEPIIdx = cast<ConstantInt>(UGEPI->getOperand(1));
7235     if (TTI->getIntImmCost(UGEPIIdx->getValue(), UGEPIIdx->getType(),
7236                            TargetTransformInfo::TCK_SizeAndLatency)
7237         > TargetTransformInfo::TCC_Basic)
7238       return false;
7239     UGEPIs.push_back(UGEPI);
7240   }
7241   if (UGEPIs.size() == 0)
7242     return false;
7243   // Check the materializing cost of (Uidx-Idx).
7244   for (GetElementPtrInst *UGEPI : UGEPIs) {
7245     ConstantInt *UGEPIIdx = cast<ConstantInt>(UGEPI->getOperand(1));
7246     APInt NewIdx = UGEPIIdx->getValue() - GEPIIdx->getValue();
7247     unsigned ImmCost =
7248       TTI->getIntImmCost(NewIdx, GEPIIdx->getType(),
7249                          TargetTransformInfo::TCK_SizeAndLatency);
7250     if (ImmCost > TargetTransformInfo::TCC_Basic)
7251       return false;
7252   }
7253   // Now unmerge between GEPI and UGEPIs.
7254   for (GetElementPtrInst *UGEPI : UGEPIs) {
7255     UGEPI->setOperand(0, GEPI);
7256     ConstantInt *UGEPIIdx = cast<ConstantInt>(UGEPI->getOperand(1));
7257     Constant *NewUGEPIIdx =
7258         ConstantInt::get(GEPIIdx->getType(),
7259                          UGEPIIdx->getValue() - GEPIIdx->getValue());
7260     UGEPI->setOperand(1, NewUGEPIIdx);
7261     // If GEPI is not inbounds but UGEPI is inbounds, change UGEPI to not
7262     // inbounds to avoid UB.
7263     if (!GEPI->isInBounds()) {
7264       UGEPI->setIsInBounds(false);
7265     }
7266   }
7267   // After unmerging, verify that GEPIOp is actually only used in SrcBlock (not
7268   // alive on IndirectBr edges).
7269   assert(find_if(GEPIOp->users(), [&](User *Usr) {
7270         return cast<Instruction>(Usr)->getParent() != SrcBlock;
7271       }) == GEPIOp->users().end() && "GEPIOp is used outside SrcBlock");
7272   return true;
7273 }
7274 
7275 bool CodeGenPrepare::optimizeInst(Instruction *I, bool &ModifiedDT) {
7276   // Bail out if we inserted the instruction to prevent optimizations from
7277   // stepping on each other's toes.
7278   if (InsertedInsts.count(I))
7279     return false;
7280 
7281   // TODO: Move into the switch on opcode below here.
7282   if (PHINode *P = dyn_cast<PHINode>(I)) {
7283     // It is possible for very late stage optimizations (such as SimplifyCFG)
7284     // to introduce PHI nodes too late to be cleaned up.  If we detect such a
7285     // trivial PHI, go ahead and zap it here.
7286     if (Value *V = SimplifyInstruction(P, {*DL, TLInfo})) {
7287       LargeOffsetGEPMap.erase(P);
7288       P->replaceAllUsesWith(V);
7289       P->eraseFromParent();
7290       ++NumPHIsElim;
7291       return true;
7292     }
7293     return false;
7294   }
7295 
7296   if (CastInst *CI = dyn_cast<CastInst>(I)) {
7297     // If the source of the cast is a constant, then this should have
7298     // already been constant folded.  The only reason NOT to constant fold
7299     // it is if something (e.g. LSR) was careful to place the constant
7300     // evaluation in a block other than then one that uses it (e.g. to hoist
7301     // the address of globals out of a loop).  If this is the case, we don't
7302     // want to forward-subst the cast.
7303     if (isa<Constant>(CI->getOperand(0)))
7304       return false;
7305 
7306     if (OptimizeNoopCopyExpression(CI, *TLI, *DL))
7307       return true;
7308 
7309     if (isa<ZExtInst>(I) || isa<SExtInst>(I)) {
7310       /// Sink a zext or sext into its user blocks if the target type doesn't
7311       /// fit in one register
7312       if (TLI->getTypeAction(CI->getContext(),
7313                              TLI->getValueType(*DL, CI->getType())) ==
7314           TargetLowering::TypeExpandInteger) {
7315         return SinkCast(CI);
7316       } else {
7317         bool MadeChange = optimizeExt(I);
7318         return MadeChange | optimizeExtUses(I);
7319       }
7320     }
7321     return false;
7322   }
7323 
7324   if (auto *Cmp = dyn_cast<CmpInst>(I))
7325     if (optimizeCmp(Cmp, ModifiedDT))
7326       return true;
7327 
7328   if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
7329     LI->setMetadata(LLVMContext::MD_invariant_group, nullptr);
7330     bool Modified = optimizeLoadExt(LI);
7331     unsigned AS = LI->getPointerAddressSpace();
7332     Modified |= optimizeMemoryInst(I, I->getOperand(0), LI->getType(), AS);
7333     return Modified;
7334   }
7335 
7336   if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
7337     if (splitMergedValStore(*SI, *DL, *TLI))
7338       return true;
7339     SI->setMetadata(LLVMContext::MD_invariant_group, nullptr);
7340     unsigned AS = SI->getPointerAddressSpace();
7341     return optimizeMemoryInst(I, SI->getOperand(1),
7342                               SI->getOperand(0)->getType(), AS);
7343   }
7344 
7345   if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) {
7346       unsigned AS = RMW->getPointerAddressSpace();
7347       return optimizeMemoryInst(I, RMW->getPointerOperand(),
7348                                 RMW->getType(), AS);
7349   }
7350 
7351   if (AtomicCmpXchgInst *CmpX = dyn_cast<AtomicCmpXchgInst>(I)) {
7352       unsigned AS = CmpX->getPointerAddressSpace();
7353       return optimizeMemoryInst(I, CmpX->getPointerOperand(),
7354                                 CmpX->getCompareOperand()->getType(), AS);
7355   }
7356 
7357   BinaryOperator *BinOp = dyn_cast<BinaryOperator>(I);
7358 
7359   if (BinOp && (BinOp->getOpcode() == Instruction::And) && EnableAndCmpSinking)
7360     return sinkAndCmp0Expression(BinOp, *TLI, InsertedInsts);
7361 
7362   // TODO: Move this into the switch on opcode - it handles shifts already.
7363   if (BinOp && (BinOp->getOpcode() == Instruction::AShr ||
7364                 BinOp->getOpcode() == Instruction::LShr)) {
7365     ConstantInt *CI = dyn_cast<ConstantInt>(BinOp->getOperand(1));
7366     if (CI && TLI->hasExtractBitsInsn())
7367       if (OptimizeExtractBits(BinOp, CI, *TLI, *DL))
7368         return true;
7369   }
7370 
7371   if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I)) {
7372     if (GEPI->hasAllZeroIndices()) {
7373       /// The GEP operand must be a pointer, so must its result -> BitCast
7374       Instruction *NC = new BitCastInst(GEPI->getOperand(0), GEPI->getType(),
7375                                         GEPI->getName(), GEPI);
7376       NC->setDebugLoc(GEPI->getDebugLoc());
7377       GEPI->replaceAllUsesWith(NC);
7378       GEPI->eraseFromParent();
7379       ++NumGEPsElim;
7380       optimizeInst(NC, ModifiedDT);
7381       return true;
7382     }
7383     if (tryUnmergingGEPsAcrossIndirectBr(GEPI, TTI)) {
7384       return true;
7385     }
7386     return false;
7387   }
7388 
7389   if (FreezeInst *FI = dyn_cast<FreezeInst>(I)) {
7390     // freeze(icmp a, const)) -> icmp (freeze a), const
7391     // This helps generate efficient conditional jumps.
7392     Instruction *CmpI = nullptr;
7393     if (ICmpInst *II = dyn_cast<ICmpInst>(FI->getOperand(0)))
7394       CmpI = II;
7395     else if (FCmpInst *F = dyn_cast<FCmpInst>(FI->getOperand(0)))
7396       CmpI = F->getFastMathFlags().none() ? F : nullptr;
7397 
7398     if (CmpI && CmpI->hasOneUse()) {
7399       auto Op0 = CmpI->getOperand(0), Op1 = CmpI->getOperand(1);
7400       bool Const0 = isa<ConstantInt>(Op0) || isa<ConstantFP>(Op0) ||
7401                     isa<ConstantPointerNull>(Op0);
7402       bool Const1 = isa<ConstantInt>(Op1) || isa<ConstantFP>(Op1) ||
7403                     isa<ConstantPointerNull>(Op1);
7404       if (Const0 || Const1) {
7405         if (!Const0 || !Const1) {
7406           auto *F = new FreezeInst(Const0 ? Op1 : Op0, "", CmpI);
7407           F->takeName(FI);
7408           CmpI->setOperand(Const0 ? 1 : 0, F);
7409         }
7410         FI->replaceAllUsesWith(CmpI);
7411         FI->eraseFromParent();
7412         return true;
7413       }
7414     }
7415     return false;
7416   }
7417 
7418   if (tryToSinkFreeOperands(I))
7419     return true;
7420 
7421   switch (I->getOpcode()) {
7422   case Instruction::Shl:
7423   case Instruction::LShr:
7424   case Instruction::AShr:
7425     return optimizeShiftInst(cast<BinaryOperator>(I));
7426   case Instruction::Call:
7427     return optimizeCallInst(cast<CallInst>(I), ModifiedDT);
7428   case Instruction::Select:
7429     return optimizeSelectInst(cast<SelectInst>(I));
7430   case Instruction::ShuffleVector:
7431     return optimizeShuffleVectorInst(cast<ShuffleVectorInst>(I));
7432   case Instruction::Switch:
7433     return optimizeSwitchInst(cast<SwitchInst>(I));
7434   case Instruction::ExtractElement:
7435     return optimizeExtractElementInst(cast<ExtractElementInst>(I));
7436   }
7437 
7438   return false;
7439 }
7440 
7441 /// Given an OR instruction, check to see if this is a bitreverse
7442 /// idiom. If so, insert the new intrinsic and return true.
7443 static bool makeBitReverse(Instruction &I, const DataLayout &DL,
7444                            const TargetLowering &TLI) {
7445   if (!I.getType()->isIntegerTy() ||
7446       !TLI.isOperationLegalOrCustom(ISD::BITREVERSE,
7447                                     TLI.getValueType(DL, I.getType(), true)))
7448     return false;
7449 
7450   SmallVector<Instruction*, 4> Insts;
7451   if (!recognizeBSwapOrBitReverseIdiom(&I, false, true, Insts))
7452     return false;
7453   Instruction *LastInst = Insts.back();
7454   I.replaceAllUsesWith(LastInst);
7455   RecursivelyDeleteTriviallyDeadInstructions(&I);
7456   return true;
7457 }
7458 
7459 // In this pass we look for GEP and cast instructions that are used
7460 // across basic blocks and rewrite them to improve basic-block-at-a-time
7461 // selection.
7462 bool CodeGenPrepare::optimizeBlock(BasicBlock &BB, bool &ModifiedDT) {
7463   SunkAddrs.clear();
7464   bool MadeChange = false;
7465 
7466   CurInstIterator = BB.begin();
7467   while (CurInstIterator != BB.end()) {
7468     MadeChange |= optimizeInst(&*CurInstIterator++, ModifiedDT);
7469     if (ModifiedDT)
7470       return true;
7471   }
7472 
7473   bool MadeBitReverse = true;
7474   while (MadeBitReverse) {
7475     MadeBitReverse = false;
7476     for (auto &I : reverse(BB)) {
7477       if (makeBitReverse(I, *DL, *TLI)) {
7478         MadeBitReverse = MadeChange = true;
7479         break;
7480       }
7481     }
7482   }
7483   MadeChange |= dupRetToEnableTailCallOpts(&BB, ModifiedDT);
7484 
7485   return MadeChange;
7486 }
7487 
7488 // Some CGP optimizations may move or alter what's computed in a block. Check
7489 // whether a dbg.value intrinsic could be pointed at a more appropriate operand.
7490 bool CodeGenPrepare::fixupDbgValue(Instruction *I) {
7491   assert(isa<DbgValueInst>(I));
7492   DbgValueInst &DVI = *cast<DbgValueInst>(I);
7493 
7494   // Does this dbg.value refer to a sunk address calculation?
7495   Value *Location = DVI.getVariableLocation();
7496   WeakTrackingVH SunkAddrVH = SunkAddrs[Location];
7497   Value *SunkAddr = SunkAddrVH.pointsToAliveValue() ? SunkAddrVH : nullptr;
7498   if (SunkAddr) {
7499     // Point dbg.value at locally computed address, which should give the best
7500     // opportunity to be accurately lowered. This update may change the type of
7501     // pointer being referred to; however this makes no difference to debugging
7502     // information, and we can't generate bitcasts that may affect codegen.
7503     DVI.setOperand(0, MetadataAsValue::get(DVI.getContext(),
7504                                            ValueAsMetadata::get(SunkAddr)));
7505     return true;
7506   }
7507   return false;
7508 }
7509 
7510 // A llvm.dbg.value may be using a value before its definition, due to
7511 // optimizations in this pass and others. Scan for such dbg.values, and rescue
7512 // them by moving the dbg.value to immediately after the value definition.
7513 // FIXME: Ideally this should never be necessary, and this has the potential
7514 // to re-order dbg.value intrinsics.
7515 bool CodeGenPrepare::placeDbgValues(Function &F) {
7516   bool MadeChange = false;
7517   DominatorTree DT(F);
7518 
7519   for (BasicBlock &BB : F) {
7520     for (BasicBlock::iterator BI = BB.begin(), BE = BB.end(); BI != BE;) {
7521       Instruction *Insn = &*BI++;
7522       DbgValueInst *DVI = dyn_cast<DbgValueInst>(Insn);
7523       if (!DVI)
7524         continue;
7525 
7526       Instruction *VI = dyn_cast_or_null<Instruction>(DVI->getValue());
7527 
7528       if (!VI || VI->isTerminator())
7529         continue;
7530 
7531       // If VI is a phi in a block with an EHPad terminator, we can't insert
7532       // after it.
7533       if (isa<PHINode>(VI) && VI->getParent()->getTerminator()->isEHPad())
7534         continue;
7535 
7536       // If the defining instruction dominates the dbg.value, we do not need
7537       // to move the dbg.value.
7538       if (DT.dominates(VI, DVI))
7539         continue;
7540 
7541       LLVM_DEBUG(dbgs() << "Moving Debug Value before :\n"
7542                         << *DVI << ' ' << *VI);
7543       DVI->removeFromParent();
7544       if (isa<PHINode>(VI))
7545         DVI->insertBefore(&*VI->getParent()->getFirstInsertionPt());
7546       else
7547         DVI->insertAfter(VI);
7548       MadeChange = true;
7549       ++NumDbgValueMoved;
7550     }
7551   }
7552   return MadeChange;
7553 }
7554 
7555 /// Scale down both weights to fit into uint32_t.
7556 static void scaleWeights(uint64_t &NewTrue, uint64_t &NewFalse) {
7557   uint64_t NewMax = (NewTrue > NewFalse) ? NewTrue : NewFalse;
7558   uint32_t Scale = (NewMax / std::numeric_limits<uint32_t>::max()) + 1;
7559   NewTrue = NewTrue / Scale;
7560   NewFalse = NewFalse / Scale;
7561 }
7562 
7563 /// Some targets prefer to split a conditional branch like:
7564 /// \code
7565 ///   %0 = icmp ne i32 %a, 0
7566 ///   %1 = icmp ne i32 %b, 0
7567 ///   %or.cond = or i1 %0, %1
7568 ///   br i1 %or.cond, label %TrueBB, label %FalseBB
7569 /// \endcode
7570 /// into multiple branch instructions like:
7571 /// \code
7572 ///   bb1:
7573 ///     %0 = icmp ne i32 %a, 0
7574 ///     br i1 %0, label %TrueBB, label %bb2
7575 ///   bb2:
7576 ///     %1 = icmp ne i32 %b, 0
7577 ///     br i1 %1, label %TrueBB, label %FalseBB
7578 /// \endcode
7579 /// This usually allows instruction selection to do even further optimizations
7580 /// and combine the compare with the branch instruction. Currently this is
7581 /// applied for targets which have "cheap" jump instructions.
7582 ///
7583 /// FIXME: Remove the (equivalent?) implementation in SelectionDAG.
7584 ///
7585 bool CodeGenPrepare::splitBranchCondition(Function &F, bool &ModifiedDT) {
7586   if (!TM->Options.EnableFastISel || TLI->isJumpExpensive())
7587     return false;
7588 
7589   bool MadeChange = false;
7590   for (auto &BB : F) {
7591     // Does this BB end with the following?
7592     //   %cond1 = icmp|fcmp|binary instruction ...
7593     //   %cond2 = icmp|fcmp|binary instruction ...
7594     //   %cond.or = or|and i1 %cond1, cond2
7595     //   br i1 %cond.or label %dest1, label %dest2"
7596     BinaryOperator *LogicOp;
7597     BasicBlock *TBB, *FBB;
7598     if (!match(BB.getTerminator(), m_Br(m_OneUse(m_BinOp(LogicOp)), TBB, FBB)))
7599       continue;
7600 
7601     auto *Br1 = cast<BranchInst>(BB.getTerminator());
7602     if (Br1->getMetadata(LLVMContext::MD_unpredictable))
7603       continue;
7604 
7605     // The merging of mostly empty BB can cause a degenerate branch.
7606     if (TBB == FBB)
7607       continue;
7608 
7609     unsigned Opc;
7610     Value *Cond1, *Cond2;
7611     if (match(LogicOp, m_And(m_OneUse(m_Value(Cond1)),
7612                              m_OneUse(m_Value(Cond2)))))
7613       Opc = Instruction::And;
7614     else if (match(LogicOp, m_Or(m_OneUse(m_Value(Cond1)),
7615                                  m_OneUse(m_Value(Cond2)))))
7616       Opc = Instruction::Or;
7617     else
7618       continue;
7619 
7620     if (!match(Cond1, m_CombineOr(m_Cmp(), m_BinOp())) ||
7621         !match(Cond2, m_CombineOr(m_Cmp(), m_BinOp()))   )
7622       continue;
7623 
7624     LLVM_DEBUG(dbgs() << "Before branch condition splitting\n"; BB.dump());
7625 
7626     // Create a new BB.
7627     auto *TmpBB =
7628         BasicBlock::Create(BB.getContext(), BB.getName() + ".cond.split",
7629                            BB.getParent(), BB.getNextNode());
7630 
7631     // Update original basic block by using the first condition directly by the
7632     // branch instruction and removing the no longer needed and/or instruction.
7633     Br1->setCondition(Cond1);
7634     LogicOp->eraseFromParent();
7635 
7636     // Depending on the condition we have to either replace the true or the
7637     // false successor of the original branch instruction.
7638     if (Opc == Instruction::And)
7639       Br1->setSuccessor(0, TmpBB);
7640     else
7641       Br1->setSuccessor(1, TmpBB);
7642 
7643     // Fill in the new basic block.
7644     auto *Br2 = IRBuilder<>(TmpBB).CreateCondBr(Cond2, TBB, FBB);
7645     if (auto *I = dyn_cast<Instruction>(Cond2)) {
7646       I->removeFromParent();
7647       I->insertBefore(Br2);
7648     }
7649 
7650     // Update PHI nodes in both successors. The original BB needs to be
7651     // replaced in one successor's PHI nodes, because the branch comes now from
7652     // the newly generated BB (NewBB). In the other successor we need to add one
7653     // incoming edge to the PHI nodes, because both branch instructions target
7654     // now the same successor. Depending on the original branch condition
7655     // (and/or) we have to swap the successors (TrueDest, FalseDest), so that
7656     // we perform the correct update for the PHI nodes.
7657     // This doesn't change the successor order of the just created branch
7658     // instruction (or any other instruction).
7659     if (Opc == Instruction::Or)
7660       std::swap(TBB, FBB);
7661 
7662     // Replace the old BB with the new BB.
7663     TBB->replacePhiUsesWith(&BB, TmpBB);
7664 
7665     // Add another incoming edge form the new BB.
7666     for (PHINode &PN : FBB->phis()) {
7667       auto *Val = PN.getIncomingValueForBlock(&BB);
7668       PN.addIncoming(Val, TmpBB);
7669     }
7670 
7671     // Update the branch weights (from SelectionDAGBuilder::
7672     // FindMergedConditions).
7673     if (Opc == Instruction::Or) {
7674       // Codegen X | Y as:
7675       // BB1:
7676       //   jmp_if_X TBB
7677       //   jmp TmpBB
7678       // TmpBB:
7679       //   jmp_if_Y TBB
7680       //   jmp FBB
7681       //
7682 
7683       // We have flexibility in setting Prob for BB1 and Prob for NewBB.
7684       // The requirement is that
7685       //   TrueProb for BB1 + (FalseProb for BB1 * TrueProb for TmpBB)
7686       //     = TrueProb for original BB.
7687       // Assuming the original weights are A and B, one choice is to set BB1's
7688       // weights to A and A+2B, and set TmpBB's weights to A and 2B. This choice
7689       // assumes that
7690       //   TrueProb for BB1 == FalseProb for BB1 * TrueProb for TmpBB.
7691       // Another choice is to assume TrueProb for BB1 equals to TrueProb for
7692       // TmpBB, but the math is more complicated.
7693       uint64_t TrueWeight, FalseWeight;
7694       if (Br1->extractProfMetadata(TrueWeight, FalseWeight)) {
7695         uint64_t NewTrueWeight = TrueWeight;
7696         uint64_t NewFalseWeight = TrueWeight + 2 * FalseWeight;
7697         scaleWeights(NewTrueWeight, NewFalseWeight);
7698         Br1->setMetadata(LLVMContext::MD_prof, MDBuilder(Br1->getContext())
7699                          .createBranchWeights(TrueWeight, FalseWeight));
7700 
7701         NewTrueWeight = TrueWeight;
7702         NewFalseWeight = 2 * FalseWeight;
7703         scaleWeights(NewTrueWeight, NewFalseWeight);
7704         Br2->setMetadata(LLVMContext::MD_prof, MDBuilder(Br2->getContext())
7705                          .createBranchWeights(TrueWeight, FalseWeight));
7706       }
7707     } else {
7708       // Codegen X & Y as:
7709       // BB1:
7710       //   jmp_if_X TmpBB
7711       //   jmp FBB
7712       // TmpBB:
7713       //   jmp_if_Y TBB
7714       //   jmp FBB
7715       //
7716       //  This requires creation of TmpBB after CurBB.
7717 
7718       // We have flexibility in setting Prob for BB1 and Prob for TmpBB.
7719       // The requirement is that
7720       //   FalseProb for BB1 + (TrueProb for BB1 * FalseProb for TmpBB)
7721       //     = FalseProb for original BB.
7722       // Assuming the original weights are A and B, one choice is to set BB1's
7723       // weights to 2A+B and B, and set TmpBB's weights to 2A and B. This choice
7724       // assumes that
7725       //   FalseProb for BB1 == TrueProb for BB1 * FalseProb for TmpBB.
7726       uint64_t TrueWeight, FalseWeight;
7727       if (Br1->extractProfMetadata(TrueWeight, FalseWeight)) {
7728         uint64_t NewTrueWeight = 2 * TrueWeight + FalseWeight;
7729         uint64_t NewFalseWeight = FalseWeight;
7730         scaleWeights(NewTrueWeight, NewFalseWeight);
7731         Br1->setMetadata(LLVMContext::MD_prof, MDBuilder(Br1->getContext())
7732                          .createBranchWeights(TrueWeight, FalseWeight));
7733 
7734         NewTrueWeight = 2 * TrueWeight;
7735         NewFalseWeight = FalseWeight;
7736         scaleWeights(NewTrueWeight, NewFalseWeight);
7737         Br2->setMetadata(LLVMContext::MD_prof, MDBuilder(Br2->getContext())
7738                          .createBranchWeights(TrueWeight, FalseWeight));
7739       }
7740     }
7741 
7742     ModifiedDT = true;
7743     MadeChange = true;
7744 
7745     LLVM_DEBUG(dbgs() << "After branch condition splitting\n"; BB.dump();
7746                TmpBB->dump());
7747   }
7748   return MadeChange;
7749 }
7750