1 //===-- AMDGPUCodeGenPrepare.cpp ------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// This pass does misc. AMDGPU optimizations on IR before instruction
11 /// selection.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "AMDGPU.h"
16 #include "AMDGPUTargetMachine.h"
17 #include "llvm/Analysis/AssumptionCache.h"
18 #include "llvm/Analysis/ConstantFolding.h"
19 #include "llvm/Analysis/LegacyDivergenceAnalysis.h"
20 #include "llvm/Analysis/ValueTracking.h"
21 #include "llvm/CodeGen/TargetPassConfig.h"
22 #include "llvm/IR/Dominators.h"
23 #include "llvm/IR/InstVisitor.h"
24 #include "llvm/IR/IntrinsicsAMDGPU.h"
25 #include "llvm/IR/IRBuilder.h"
26 #include "llvm/InitializePasses.h"
27 #include "llvm/Pass.h"
28 #include "llvm/Support/KnownBits.h"
29 #include "llvm/Transforms/Utils/IntegerDivision.h"
30 
31 #define DEBUG_TYPE "amdgpu-codegenprepare"
32 
33 using namespace llvm;
34 
35 namespace {
36 
37 static cl::opt<bool> WidenLoads(
38   "amdgpu-codegenprepare-widen-constant-loads",
39   cl::desc("Widen sub-dword constant address space loads in AMDGPUCodeGenPrepare"),
40   cl::ReallyHidden,
41   cl::init(false));
42 
43 static cl::opt<bool> Widen16BitOps(
44   "amdgpu-codegenprepare-widen-16-bit-ops",
45   cl::desc("Widen uniform 16-bit instructions to 32-bit in AMDGPUCodeGenPrepare"),
46   cl::ReallyHidden,
47   cl::init(true));
48 
49 static cl::opt<bool> UseMul24Intrin(
50   "amdgpu-codegenprepare-mul24",
51   cl::desc("Introduce mul24 intrinsics in AMDGPUCodeGenPrepare"),
52   cl::ReallyHidden,
53   cl::init(true));
54 
55 // Legalize 64-bit division by using the generic IR expansion.
56 static cl::opt<bool> ExpandDiv64InIR(
57   "amdgpu-codegenprepare-expand-div64",
58   cl::desc("Expand 64-bit division in AMDGPUCodeGenPrepare"),
59   cl::ReallyHidden,
60   cl::init(false));
61 
62 // Leave all division operations as they are. This supersedes ExpandDiv64InIR
63 // and is used for testing the legalizer.
64 static cl::opt<bool> DisableIDivExpand(
65   "amdgpu-codegenprepare-disable-idiv-expansion",
66   cl::desc("Prevent expanding integer division in AMDGPUCodeGenPrepare"),
67   cl::ReallyHidden,
68   cl::init(false));
69 
70 class AMDGPUCodeGenPrepare : public FunctionPass,
71                              public InstVisitor<AMDGPUCodeGenPrepare, bool> {
72   const GCNSubtarget *ST = nullptr;
73   AssumptionCache *AC = nullptr;
74   DominatorTree *DT = nullptr;
75   LegacyDivergenceAnalysis *DA = nullptr;
76   Module *Mod = nullptr;
77   const DataLayout *DL = nullptr;
78   bool HasUnsafeFPMath = false;
79   bool HasFP32Denormals = false;
80 
81   /// Copies exact/nsw/nuw flags (if any) from binary operation \p I to
82   /// binary operation \p V.
83   ///
84   /// \returns Binary operation \p V.
85   /// \returns \p T's base element bit width.
86   unsigned getBaseElementBitWidth(const Type *T) const;
87 
88   /// \returns Equivalent 32 bit integer type for given type \p T. For example,
89   /// if \p T is i7, then i32 is returned; if \p T is <3 x i12>, then <3 x i32>
90   /// is returned.
91   Type *getI32Ty(IRBuilder<> &B, const Type *T) const;
92 
93   /// \returns True if binary operation \p I is a signed binary operation, false
94   /// otherwise.
95   bool isSigned(const BinaryOperator &I) const;
96 
97   /// \returns True if the condition of 'select' operation \p I comes from a
98   /// signed 'icmp' operation, false otherwise.
99   bool isSigned(const SelectInst &I) const;
100 
101   /// \returns True if type \p T needs to be promoted to 32 bit integer type,
102   /// false otherwise.
103   bool needsPromotionToI32(const Type *T) const;
104 
105   /// Promotes uniform binary operation \p I to equivalent 32 bit binary
106   /// operation.
107   ///
108   /// \details \p I's base element bit width must be greater than 1 and less
109   /// than or equal 16. Promotion is done by sign or zero extending operands to
110   /// 32 bits, replacing \p I with equivalent 32 bit binary operation, and
111   /// truncating the result of 32 bit binary operation back to \p I's original
112   /// type. Division operation is not promoted.
113   ///
114   /// \returns True if \p I is promoted to equivalent 32 bit binary operation,
115   /// false otherwise.
116   bool promoteUniformOpToI32(BinaryOperator &I) const;
117 
118   /// Promotes uniform 'icmp' operation \p I to 32 bit 'icmp' operation.
119   ///
120   /// \details \p I's base element bit width must be greater than 1 and less
121   /// than or equal 16. Promotion is done by sign or zero extending operands to
122   /// 32 bits, and replacing \p I with 32 bit 'icmp' operation.
123   ///
124   /// \returns True.
125   bool promoteUniformOpToI32(ICmpInst &I) const;
126 
127   /// Promotes uniform 'select' operation \p I to 32 bit 'select'
128   /// operation.
129   ///
130   /// \details \p I's base element bit width must be greater than 1 and less
131   /// than or equal 16. Promotion is done by sign or zero extending operands to
132   /// 32 bits, replacing \p I with 32 bit 'select' operation, and truncating the
133   /// result of 32 bit 'select' operation back to \p I's original type.
134   ///
135   /// \returns True.
136   bool promoteUniformOpToI32(SelectInst &I) const;
137 
138   /// Promotes uniform 'bitreverse' intrinsic \p I to 32 bit 'bitreverse'
139   /// intrinsic.
140   ///
141   /// \details \p I's base element bit width must be greater than 1 and less
142   /// than or equal 16. Promotion is done by zero extending the operand to 32
143   /// bits, replacing \p I with 32 bit 'bitreverse' intrinsic, shifting the
144   /// result of 32 bit 'bitreverse' intrinsic to the right with zero fill (the
145   /// shift amount is 32 minus \p I's base element bit width), and truncating
146   /// the result of the shift operation back to \p I's original type.
147   ///
148   /// \returns True.
149   bool promoteUniformBitreverseToI32(IntrinsicInst &I) const;
150 
151 
152   unsigned numBitsUnsigned(Value *Op, unsigned ScalarSize) const;
153   unsigned numBitsSigned(Value *Op, unsigned ScalarSize) const;
154   bool isI24(Value *V, unsigned ScalarSize) const;
155   bool isU24(Value *V, unsigned ScalarSize) const;
156 
157   /// Replace mul instructions with llvm.amdgcn.mul.u24 or llvm.amdgcn.mul.s24.
158   /// SelectionDAG has an issue where an and asserting the bits are known
159   bool replaceMulWithMul24(BinaryOperator &I) const;
160 
161   /// Perform same function as equivalently named function in DAGCombiner. Since
162   /// we expand some divisions here, we need to perform this before obscuring.
163   bool foldBinOpIntoSelect(BinaryOperator &I) const;
164 
165   bool divHasSpecialOptimization(BinaryOperator &I,
166                                  Value *Num, Value *Den) const;
167   int getDivNumBits(BinaryOperator &I,
168                     Value *Num, Value *Den,
169                     unsigned AtLeast, bool Signed) const;
170 
171   /// Expands 24 bit div or rem.
172   Value* expandDivRem24(IRBuilder<> &Builder, BinaryOperator &I,
173                         Value *Num, Value *Den,
174                         bool IsDiv, bool IsSigned) const;
175 
176   Value *expandDivRem24Impl(IRBuilder<> &Builder, BinaryOperator &I,
177                             Value *Num, Value *Den, unsigned NumBits,
178                             bool IsDiv, bool IsSigned) const;
179 
180   /// Expands 32 bit div or rem.
181   Value* expandDivRem32(IRBuilder<> &Builder, BinaryOperator &I,
182                         Value *Num, Value *Den) const;
183 
184   Value *shrinkDivRem64(IRBuilder<> &Builder, BinaryOperator &I,
185                         Value *Num, Value *Den) const;
186   void expandDivRem64(BinaryOperator &I) const;
187 
188   /// Widen a scalar load.
189   ///
190   /// \details \p Widen scalar load for uniform, small type loads from constant
191   //  memory / to a full 32-bits and then truncate the input to allow a scalar
192   //  load instead of a vector load.
193   //
194   /// \returns True.
195 
196   bool canWidenScalarExtLoad(LoadInst &I) const;
197 
198 public:
199   static char ID;
200 
201   AMDGPUCodeGenPrepare() : FunctionPass(ID) {}
202 
203   bool visitFDiv(BinaryOperator &I);
204   bool visitXor(BinaryOperator &I);
205 
206   bool visitInstruction(Instruction &I) { return false; }
207   bool visitBinaryOperator(BinaryOperator &I);
208   bool visitLoadInst(LoadInst &I);
209   bool visitICmpInst(ICmpInst &I);
210   bool visitSelectInst(SelectInst &I);
211 
212   bool visitIntrinsicInst(IntrinsicInst &I);
213   bool visitBitreverseIntrinsicInst(IntrinsicInst &I);
214 
215   bool doInitialization(Module &M) override;
216   bool runOnFunction(Function &F) override;
217 
218   StringRef getPassName() const override { return "AMDGPU IR optimizations"; }
219 
220   void getAnalysisUsage(AnalysisUsage &AU) const override {
221     AU.addRequired<AssumptionCacheTracker>();
222     AU.addRequired<LegacyDivergenceAnalysis>();
223 
224     // FIXME: Division expansion needs to preserve the dominator tree.
225     if (!ExpandDiv64InIR)
226       AU.setPreservesAll();
227  }
228 };
229 
230 } // end anonymous namespace
231 
232 unsigned AMDGPUCodeGenPrepare::getBaseElementBitWidth(const Type *T) const {
233   assert(needsPromotionToI32(T) && "T does not need promotion to i32");
234 
235   if (T->isIntegerTy())
236     return T->getIntegerBitWidth();
237   return cast<VectorType>(T)->getElementType()->getIntegerBitWidth();
238 }
239 
240 Type *AMDGPUCodeGenPrepare::getI32Ty(IRBuilder<> &B, const Type *T) const {
241   assert(needsPromotionToI32(T) && "T does not need promotion to i32");
242 
243   if (T->isIntegerTy())
244     return B.getInt32Ty();
245   return FixedVectorType::get(B.getInt32Ty(), cast<FixedVectorType>(T));
246 }
247 
248 bool AMDGPUCodeGenPrepare::isSigned(const BinaryOperator &I) const {
249   return I.getOpcode() == Instruction::AShr ||
250       I.getOpcode() == Instruction::SDiv || I.getOpcode() == Instruction::SRem;
251 }
252 
253 bool AMDGPUCodeGenPrepare::isSigned(const SelectInst &I) const {
254   return isa<ICmpInst>(I.getOperand(0)) ?
255       cast<ICmpInst>(I.getOperand(0))->isSigned() : false;
256 }
257 
258 bool AMDGPUCodeGenPrepare::needsPromotionToI32(const Type *T) const {
259   if (!Widen16BitOps)
260     return false;
261 
262   const IntegerType *IntTy = dyn_cast<IntegerType>(T);
263   if (IntTy && IntTy->getBitWidth() > 1 && IntTy->getBitWidth() <= 16)
264     return true;
265 
266   if (const VectorType *VT = dyn_cast<VectorType>(T)) {
267     // TODO: The set of packed operations is more limited, so may want to
268     // promote some anyway.
269     if (ST->hasVOP3PInsts())
270       return false;
271 
272     return needsPromotionToI32(VT->getElementType());
273   }
274 
275   return false;
276 }
277 
278 // Return true if the op promoted to i32 should have nsw set.
279 static bool promotedOpIsNSW(const Instruction &I) {
280   switch (I.getOpcode()) {
281   case Instruction::Shl:
282   case Instruction::Add:
283   case Instruction::Sub:
284     return true;
285   case Instruction::Mul:
286     return I.hasNoUnsignedWrap();
287   default:
288     return false;
289   }
290 }
291 
292 // Return true if the op promoted to i32 should have nuw set.
293 static bool promotedOpIsNUW(const Instruction &I) {
294   switch (I.getOpcode()) {
295   case Instruction::Shl:
296   case Instruction::Add:
297   case Instruction::Mul:
298     return true;
299   case Instruction::Sub:
300     return I.hasNoUnsignedWrap();
301   default:
302     return false;
303   }
304 }
305 
306 bool AMDGPUCodeGenPrepare::canWidenScalarExtLoad(LoadInst &I) const {
307   Type *Ty = I.getType();
308   const DataLayout &DL = Mod->getDataLayout();
309   int TySize = DL.getTypeSizeInBits(Ty);
310   Align Alignment = DL.getValueOrABITypeAlignment(I.getAlign(), Ty);
311 
312   return I.isSimple() && TySize < 32 && Alignment >= 4 && DA->isUniform(&I);
313 }
314 
315 bool AMDGPUCodeGenPrepare::promoteUniformOpToI32(BinaryOperator &I) const {
316   assert(needsPromotionToI32(I.getType()) &&
317          "I does not need promotion to i32");
318 
319   if (I.getOpcode() == Instruction::SDiv ||
320       I.getOpcode() == Instruction::UDiv ||
321       I.getOpcode() == Instruction::SRem ||
322       I.getOpcode() == Instruction::URem)
323     return false;
324 
325   IRBuilder<> Builder(&I);
326   Builder.SetCurrentDebugLocation(I.getDebugLoc());
327 
328   Type *I32Ty = getI32Ty(Builder, I.getType());
329   Value *ExtOp0 = nullptr;
330   Value *ExtOp1 = nullptr;
331   Value *ExtRes = nullptr;
332   Value *TruncRes = nullptr;
333 
334   if (isSigned(I)) {
335     ExtOp0 = Builder.CreateSExt(I.getOperand(0), I32Ty);
336     ExtOp1 = Builder.CreateSExt(I.getOperand(1), I32Ty);
337   } else {
338     ExtOp0 = Builder.CreateZExt(I.getOperand(0), I32Ty);
339     ExtOp1 = Builder.CreateZExt(I.getOperand(1), I32Ty);
340   }
341 
342   ExtRes = Builder.CreateBinOp(I.getOpcode(), ExtOp0, ExtOp1);
343   if (Instruction *Inst = dyn_cast<Instruction>(ExtRes)) {
344     if (promotedOpIsNSW(cast<Instruction>(I)))
345       Inst->setHasNoSignedWrap();
346 
347     if (promotedOpIsNUW(cast<Instruction>(I)))
348       Inst->setHasNoUnsignedWrap();
349 
350     if (const auto *ExactOp = dyn_cast<PossiblyExactOperator>(&I))
351       Inst->setIsExact(ExactOp->isExact());
352   }
353 
354   TruncRes = Builder.CreateTrunc(ExtRes, I.getType());
355 
356   I.replaceAllUsesWith(TruncRes);
357   I.eraseFromParent();
358 
359   return true;
360 }
361 
362 bool AMDGPUCodeGenPrepare::promoteUniformOpToI32(ICmpInst &I) const {
363   assert(needsPromotionToI32(I.getOperand(0)->getType()) &&
364          "I does not need promotion to i32");
365 
366   IRBuilder<> Builder(&I);
367   Builder.SetCurrentDebugLocation(I.getDebugLoc());
368 
369   Type *I32Ty = getI32Ty(Builder, I.getOperand(0)->getType());
370   Value *ExtOp0 = nullptr;
371   Value *ExtOp1 = nullptr;
372   Value *NewICmp  = nullptr;
373 
374   if (I.isSigned()) {
375     ExtOp0 = Builder.CreateSExt(I.getOperand(0), I32Ty);
376     ExtOp1 = Builder.CreateSExt(I.getOperand(1), I32Ty);
377   } else {
378     ExtOp0 = Builder.CreateZExt(I.getOperand(0), I32Ty);
379     ExtOp1 = Builder.CreateZExt(I.getOperand(1), I32Ty);
380   }
381   NewICmp = Builder.CreateICmp(I.getPredicate(), ExtOp0, ExtOp1);
382 
383   I.replaceAllUsesWith(NewICmp);
384   I.eraseFromParent();
385 
386   return true;
387 }
388 
389 bool AMDGPUCodeGenPrepare::promoteUniformOpToI32(SelectInst &I) const {
390   assert(needsPromotionToI32(I.getType()) &&
391          "I does not need promotion to i32");
392 
393   IRBuilder<> Builder(&I);
394   Builder.SetCurrentDebugLocation(I.getDebugLoc());
395 
396   Type *I32Ty = getI32Ty(Builder, I.getType());
397   Value *ExtOp1 = nullptr;
398   Value *ExtOp2 = nullptr;
399   Value *ExtRes = nullptr;
400   Value *TruncRes = nullptr;
401 
402   if (isSigned(I)) {
403     ExtOp1 = Builder.CreateSExt(I.getOperand(1), I32Ty);
404     ExtOp2 = Builder.CreateSExt(I.getOperand(2), I32Ty);
405   } else {
406     ExtOp1 = Builder.CreateZExt(I.getOperand(1), I32Ty);
407     ExtOp2 = Builder.CreateZExt(I.getOperand(2), I32Ty);
408   }
409   ExtRes = Builder.CreateSelect(I.getOperand(0), ExtOp1, ExtOp2);
410   TruncRes = Builder.CreateTrunc(ExtRes, I.getType());
411 
412   I.replaceAllUsesWith(TruncRes);
413   I.eraseFromParent();
414 
415   return true;
416 }
417 
418 bool AMDGPUCodeGenPrepare::promoteUniformBitreverseToI32(
419     IntrinsicInst &I) const {
420   assert(I.getIntrinsicID() == Intrinsic::bitreverse &&
421          "I must be bitreverse intrinsic");
422   assert(needsPromotionToI32(I.getType()) &&
423          "I does not need promotion to i32");
424 
425   IRBuilder<> Builder(&I);
426   Builder.SetCurrentDebugLocation(I.getDebugLoc());
427 
428   Type *I32Ty = getI32Ty(Builder, I.getType());
429   Function *I32 =
430       Intrinsic::getDeclaration(Mod, Intrinsic::bitreverse, { I32Ty });
431   Value *ExtOp = Builder.CreateZExt(I.getOperand(0), I32Ty);
432   Value *ExtRes = Builder.CreateCall(I32, { ExtOp });
433   Value *LShrOp =
434       Builder.CreateLShr(ExtRes, 32 - getBaseElementBitWidth(I.getType()));
435   Value *TruncRes =
436       Builder.CreateTrunc(LShrOp, I.getType());
437 
438   I.replaceAllUsesWith(TruncRes);
439   I.eraseFromParent();
440 
441   return true;
442 }
443 
444 unsigned AMDGPUCodeGenPrepare::numBitsUnsigned(Value *Op,
445                                                unsigned ScalarSize) const {
446   KnownBits Known = computeKnownBits(Op, *DL, 0, AC);
447   return ScalarSize - Known.countMinLeadingZeros();
448 }
449 
450 unsigned AMDGPUCodeGenPrepare::numBitsSigned(Value *Op,
451                                              unsigned ScalarSize) const {
452   // In order for this to be a signed 24-bit value, bit 23, must
453   // be a sign bit.
454   return ScalarSize - ComputeNumSignBits(Op, *DL, 0, AC);
455 }
456 
457 bool AMDGPUCodeGenPrepare::isI24(Value *V, unsigned ScalarSize) const {
458   return ScalarSize >= 24 && // Types less than 24-bit should be treated
459                                      // as unsigned 24-bit values.
460     numBitsSigned(V, ScalarSize) < 24;
461 }
462 
463 bool AMDGPUCodeGenPrepare::isU24(Value *V, unsigned ScalarSize) const {
464   return numBitsUnsigned(V, ScalarSize) <= 24;
465 }
466 
467 static void extractValues(IRBuilder<> &Builder,
468                           SmallVectorImpl<Value *> &Values, Value *V) {
469   auto *VT = dyn_cast<FixedVectorType>(V->getType());
470   if (!VT) {
471     Values.push_back(V);
472     return;
473   }
474 
475   for (int I = 0, E = VT->getNumElements(); I != E; ++I)
476     Values.push_back(Builder.CreateExtractElement(V, I));
477 }
478 
479 static Value *insertValues(IRBuilder<> &Builder,
480                            Type *Ty,
481                            SmallVectorImpl<Value *> &Values) {
482   if (Values.size() == 1)
483     return Values[0];
484 
485   Value *NewVal = UndefValue::get(Ty);
486   for (int I = 0, E = Values.size(); I != E; ++I)
487     NewVal = Builder.CreateInsertElement(NewVal, Values[I], I);
488 
489   return NewVal;
490 }
491 
492 bool AMDGPUCodeGenPrepare::replaceMulWithMul24(BinaryOperator &I) const {
493   if (I.getOpcode() != Instruction::Mul)
494     return false;
495 
496   Type *Ty = I.getType();
497   unsigned Size = Ty->getScalarSizeInBits();
498   if (Size <= 16 && ST->has16BitInsts())
499     return false;
500 
501   // Prefer scalar if this could be s_mul_i32
502   if (DA->isUniform(&I))
503     return false;
504 
505   Value *LHS = I.getOperand(0);
506   Value *RHS = I.getOperand(1);
507   IRBuilder<> Builder(&I);
508   Builder.SetCurrentDebugLocation(I.getDebugLoc());
509 
510   Intrinsic::ID IntrID = Intrinsic::not_intrinsic;
511 
512   if (ST->hasMulU24() && isU24(LHS, Size) && isU24(RHS, Size)) {
513     // The mul24 instruction yields the low-order 32 bits. If the original
514     // result and the destination is wider than 32 bits, the mul24 would
515     // truncate the result.
516     if (Size > 32 &&
517         numBitsUnsigned(LHS, Size) + numBitsUnsigned(RHS, Size) > 32) {
518       return false;
519     }
520 
521     IntrID = Intrinsic::amdgcn_mul_u24;
522   } else if (ST->hasMulI24() && isI24(LHS, Size) && isI24(RHS, Size)) {
523     // The original result is positive if its destination is wider than 32 bits
524     // and its highest set bit is at bit 31. Generating mul24 and sign-extending
525     // it would yield a negative value.
526     if (Size > 32 && numBitsSigned(LHS, Size) + numBitsSigned(RHS, Size) > 30) {
527       return false;
528     }
529 
530     IntrID = Intrinsic::amdgcn_mul_i24;
531   } else
532     return false;
533 
534   SmallVector<Value *, 4> LHSVals;
535   SmallVector<Value *, 4> RHSVals;
536   SmallVector<Value *, 4> ResultVals;
537   extractValues(Builder, LHSVals, LHS);
538   extractValues(Builder, RHSVals, RHS);
539 
540 
541   IntegerType *I32Ty = Builder.getInt32Ty();
542   FunctionCallee Intrin = Intrinsic::getDeclaration(Mod, IntrID);
543   for (int I = 0, E = LHSVals.size(); I != E; ++I) {
544     Value *LHS, *RHS;
545     if (IntrID == Intrinsic::amdgcn_mul_u24) {
546       LHS = Builder.CreateZExtOrTrunc(LHSVals[I], I32Ty);
547       RHS = Builder.CreateZExtOrTrunc(RHSVals[I], I32Ty);
548     } else {
549       LHS = Builder.CreateSExtOrTrunc(LHSVals[I], I32Ty);
550       RHS = Builder.CreateSExtOrTrunc(RHSVals[I], I32Ty);
551     }
552 
553     Value *Result = Builder.CreateCall(Intrin, {LHS, RHS});
554 
555     if (IntrID == Intrinsic::amdgcn_mul_u24) {
556       ResultVals.push_back(Builder.CreateZExtOrTrunc(Result,
557                                                      LHSVals[I]->getType()));
558     } else {
559       ResultVals.push_back(Builder.CreateSExtOrTrunc(Result,
560                                                      LHSVals[I]->getType()));
561     }
562   }
563 
564   Value *NewVal = insertValues(Builder, Ty, ResultVals);
565   NewVal->takeName(&I);
566   I.replaceAllUsesWith(NewVal);
567   I.eraseFromParent();
568 
569   return true;
570 }
571 
572 // Find a select instruction, which may have been casted. This is mostly to deal
573 // with cases where i16 selects were promoted here to i32.
574 static SelectInst *findSelectThroughCast(Value *V, CastInst *&Cast) {
575   Cast = nullptr;
576   if (SelectInst *Sel = dyn_cast<SelectInst>(V))
577     return Sel;
578 
579   if ((Cast = dyn_cast<CastInst>(V))) {
580     if (SelectInst *Sel = dyn_cast<SelectInst>(Cast->getOperand(0)))
581       return Sel;
582   }
583 
584   return nullptr;
585 }
586 
587 bool AMDGPUCodeGenPrepare::foldBinOpIntoSelect(BinaryOperator &BO) const {
588   // Don't do this unless the old select is going away. We want to eliminate the
589   // binary operator, not replace a binop with a select.
590   int SelOpNo = 0;
591 
592   CastInst *CastOp;
593 
594   // TODO: Should probably try to handle some cases with multiple
595   // users. Duplicating the select may be profitable for division.
596   SelectInst *Sel = findSelectThroughCast(BO.getOperand(0), CastOp);
597   if (!Sel || !Sel->hasOneUse()) {
598     SelOpNo = 1;
599     Sel = findSelectThroughCast(BO.getOperand(1), CastOp);
600   }
601 
602   if (!Sel || !Sel->hasOneUse())
603     return false;
604 
605   Constant *CT = dyn_cast<Constant>(Sel->getTrueValue());
606   Constant *CF = dyn_cast<Constant>(Sel->getFalseValue());
607   Constant *CBO = dyn_cast<Constant>(BO.getOperand(SelOpNo ^ 1));
608   if (!CBO || !CT || !CF)
609     return false;
610 
611   if (CastOp) {
612     if (!CastOp->hasOneUse())
613       return false;
614     CT = ConstantFoldCastOperand(CastOp->getOpcode(), CT, BO.getType(), *DL);
615     CF = ConstantFoldCastOperand(CastOp->getOpcode(), CF, BO.getType(), *DL);
616   }
617 
618   // TODO: Handle special 0/-1 cases DAG combine does, although we only really
619   // need to handle divisions here.
620   Constant *FoldedT = SelOpNo ?
621     ConstantFoldBinaryOpOperands(BO.getOpcode(), CBO, CT, *DL) :
622     ConstantFoldBinaryOpOperands(BO.getOpcode(), CT, CBO, *DL);
623   if (isa<ConstantExpr>(FoldedT))
624     return false;
625 
626   Constant *FoldedF = SelOpNo ?
627     ConstantFoldBinaryOpOperands(BO.getOpcode(), CBO, CF, *DL) :
628     ConstantFoldBinaryOpOperands(BO.getOpcode(), CF, CBO, *DL);
629   if (isa<ConstantExpr>(FoldedF))
630     return false;
631 
632   IRBuilder<> Builder(&BO);
633   Builder.SetCurrentDebugLocation(BO.getDebugLoc());
634   if (const FPMathOperator *FPOp = dyn_cast<const FPMathOperator>(&BO))
635     Builder.setFastMathFlags(FPOp->getFastMathFlags());
636 
637   Value *NewSelect = Builder.CreateSelect(Sel->getCondition(),
638                                           FoldedT, FoldedF);
639   NewSelect->takeName(&BO);
640   BO.replaceAllUsesWith(NewSelect);
641   BO.eraseFromParent();
642   if (CastOp)
643     CastOp->eraseFromParent();
644   Sel->eraseFromParent();
645   return true;
646 }
647 
648 // Optimize fdiv with rcp:
649 //
650 // 1/x -> rcp(x) when rcp is sufficiently accurate or inaccurate rcp is
651 //               allowed with unsafe-fp-math or afn.
652 //
653 // a/b -> a*rcp(b) when inaccurate rcp is allowed with unsafe-fp-math or afn.
654 static Value *optimizeWithRcp(Value *Num, Value *Den, bool AllowInaccurateRcp,
655                               bool RcpIsAccurate, IRBuilder<> &Builder,
656                               Module *Mod) {
657 
658   if (!AllowInaccurateRcp && !RcpIsAccurate)
659     return nullptr;
660 
661   Type *Ty = Den->getType();
662   if (const ConstantFP *CLHS = dyn_cast<ConstantFP>(Num)) {
663     if (AllowInaccurateRcp || RcpIsAccurate) {
664       if (CLHS->isExactlyValue(1.0)) {
665         Function *Decl = Intrinsic::getDeclaration(
666           Mod, Intrinsic::amdgcn_rcp, Ty);
667 
668         // v_rcp_f32 and v_rsq_f32 do not support denormals, and according to
669         // the CI documentation has a worst case error of 1 ulp.
670         // OpenCL requires <= 2.5 ulp for 1.0 / x, so it should always be OK to
671         // use it as long as we aren't trying to use denormals.
672         //
673         // v_rcp_f16 and v_rsq_f16 DO support denormals.
674 
675         // NOTE: v_sqrt and v_rcp will be combined to v_rsq later. So we don't
676         //       insert rsq intrinsic here.
677 
678         // 1.0 / x -> rcp(x)
679         return Builder.CreateCall(Decl, { Den });
680       }
681 
682        // Same as for 1.0, but expand the sign out of the constant.
683       if (CLHS->isExactlyValue(-1.0)) {
684         Function *Decl = Intrinsic::getDeclaration(
685           Mod, Intrinsic::amdgcn_rcp, Ty);
686 
687          // -1.0 / x -> rcp (fneg x)
688          Value *FNeg = Builder.CreateFNeg(Den);
689          return Builder.CreateCall(Decl, { FNeg });
690        }
691     }
692   }
693 
694   if (AllowInaccurateRcp) {
695     Function *Decl = Intrinsic::getDeclaration(
696       Mod, Intrinsic::amdgcn_rcp, Ty);
697 
698     // Turn into multiply by the reciprocal.
699     // x / y -> x * (1.0 / y)
700     Value *Recip = Builder.CreateCall(Decl, { Den });
701     return Builder.CreateFMul(Num, Recip);
702   }
703   return nullptr;
704 }
705 
706 // optimize with fdiv.fast:
707 //
708 // a/b -> fdiv.fast(a, b) when !fpmath >= 2.5ulp with denormals flushed.
709 //
710 // 1/x -> fdiv.fast(1,x)  when !fpmath >= 2.5ulp.
711 //
712 // NOTE: optimizeWithRcp should be tried first because rcp is the preference.
713 static Value *optimizeWithFDivFast(Value *Num, Value *Den, float ReqdAccuracy,
714                                    bool HasDenormals, IRBuilder<> &Builder,
715                                    Module *Mod) {
716   // fdiv.fast can achieve 2.5 ULP accuracy.
717   if (ReqdAccuracy < 2.5f)
718     return nullptr;
719 
720   // Only have fdiv.fast for f32.
721   Type *Ty = Den->getType();
722   if (!Ty->isFloatTy())
723     return nullptr;
724 
725   bool NumIsOne = false;
726   if (const ConstantFP *CNum = dyn_cast<ConstantFP>(Num)) {
727     if (CNum->isExactlyValue(+1.0) || CNum->isExactlyValue(-1.0))
728       NumIsOne = true;
729   }
730 
731   // fdiv does not support denormals. But 1.0/x is always fine to use it.
732   if (HasDenormals && !NumIsOne)
733     return nullptr;
734 
735   Function *Decl = Intrinsic::getDeclaration(Mod, Intrinsic::amdgcn_fdiv_fast);
736   return Builder.CreateCall(Decl, { Num, Den });
737 }
738 
739 // Optimizations is performed based on fpmath, fast math flags as well as
740 // denormals to optimize fdiv with either rcp or fdiv.fast.
741 //
742 // With rcp:
743 //   1/x -> rcp(x) when rcp is sufficiently accurate or inaccurate rcp is
744 //                 allowed with unsafe-fp-math or afn.
745 //
746 //   a/b -> a*rcp(b) when inaccurate rcp is allowed with unsafe-fp-math or afn.
747 //
748 // With fdiv.fast:
749 //   a/b -> fdiv.fast(a, b) when !fpmath >= 2.5ulp with denormals flushed.
750 //
751 //   1/x -> fdiv.fast(1,x)  when !fpmath >= 2.5ulp.
752 //
753 // NOTE: rcp is the preference in cases that both are legal.
754 bool AMDGPUCodeGenPrepare::visitFDiv(BinaryOperator &FDiv) {
755 
756   Type *Ty = FDiv.getType()->getScalarType();
757 
758   // The f64 rcp/rsq approximations are pretty inaccurate. We can do an
759   // expansion around them in codegen.
760   if (Ty->isDoubleTy())
761     return false;
762 
763   // No intrinsic for fdiv16 if target does not support f16.
764   if (Ty->isHalfTy() && !ST->has16BitInsts())
765     return false;
766 
767   const FPMathOperator *FPOp = cast<const FPMathOperator>(&FDiv);
768   const float ReqdAccuracy =  FPOp->getFPAccuracy();
769 
770   // Inaccurate rcp is allowed with unsafe-fp-math or afn.
771   FastMathFlags FMF = FPOp->getFastMathFlags();
772   const bool AllowInaccurateRcp = HasUnsafeFPMath || FMF.approxFunc();
773 
774   // rcp_f16 is accurate for !fpmath >= 1.0ulp.
775   // rcp_f32 is accurate for !fpmath >= 1.0ulp and denormals are flushed.
776   // rcp_f64 is never accurate.
777   const bool RcpIsAccurate = (Ty->isHalfTy() && ReqdAccuracy >= 1.0f) ||
778             (Ty->isFloatTy() && !HasFP32Denormals && ReqdAccuracy >= 1.0f);
779 
780   IRBuilder<> Builder(FDiv.getParent(), std::next(FDiv.getIterator()));
781   Builder.setFastMathFlags(FMF);
782   Builder.SetCurrentDebugLocation(FDiv.getDebugLoc());
783 
784   Value *Num = FDiv.getOperand(0);
785   Value *Den = FDiv.getOperand(1);
786 
787   Value *NewFDiv = nullptr;
788   if (auto *VT = dyn_cast<FixedVectorType>(FDiv.getType())) {
789     NewFDiv = UndefValue::get(VT);
790 
791     // FIXME: Doesn't do the right thing for cases where the vector is partially
792     // constant. This works when the scalarizer pass is run first.
793     for (unsigned I = 0, E = VT->getNumElements(); I != E; ++I) {
794       Value *NumEltI = Builder.CreateExtractElement(Num, I);
795       Value *DenEltI = Builder.CreateExtractElement(Den, I);
796       // Try rcp first.
797       Value *NewElt = optimizeWithRcp(NumEltI, DenEltI, AllowInaccurateRcp,
798                                       RcpIsAccurate, Builder, Mod);
799       if (!NewElt) // Try fdiv.fast.
800         NewElt = optimizeWithFDivFast(NumEltI, DenEltI, ReqdAccuracy,
801                                       HasFP32Denormals, Builder, Mod);
802       if (!NewElt) // Keep the original.
803         NewElt = Builder.CreateFDiv(NumEltI, DenEltI);
804 
805       NewFDiv = Builder.CreateInsertElement(NewFDiv, NewElt, I);
806     }
807   } else { // Scalar FDiv.
808     // Try rcp first.
809     NewFDiv = optimizeWithRcp(Num, Den, AllowInaccurateRcp, RcpIsAccurate,
810                               Builder, Mod);
811     if (!NewFDiv) { // Try fdiv.fast.
812       NewFDiv = optimizeWithFDivFast(Num, Den, ReqdAccuracy, HasFP32Denormals,
813                                      Builder, Mod);
814     }
815   }
816 
817   if (NewFDiv) {
818     FDiv.replaceAllUsesWith(NewFDiv);
819     NewFDiv->takeName(&FDiv);
820     FDiv.eraseFromParent();
821   }
822 
823   return !!NewFDiv;
824 }
825 
826 bool AMDGPUCodeGenPrepare::visitXor(BinaryOperator &I) {
827   // Match the Xor instruction, its type and its operands
828   IntrinsicInst *IntrinsicCall = dyn_cast<IntrinsicInst>(I.getOperand(0));
829   ConstantInt *RHS = dyn_cast<ConstantInt>(I.getOperand(1));
830   if (!RHS || !IntrinsicCall || RHS->getSExtValue() != -1)
831     return visitBinaryOperator(I);
832 
833   // Check if the Call is an intrinsic instruction to amdgcn_class intrinsic
834   // has only one use
835   if (IntrinsicCall->getIntrinsicID() != Intrinsic::amdgcn_class ||
836       !IntrinsicCall->hasOneUse())
837     return visitBinaryOperator(I);
838 
839   // "Not" the second argument of the intrinsic call
840   ConstantInt *Arg = dyn_cast<ConstantInt>(IntrinsicCall->getOperand(1));
841   if (!Arg)
842     return visitBinaryOperator(I);
843 
844   IntrinsicCall->setOperand(
845       1, ConstantInt::get(Arg->getType(), Arg->getZExtValue() ^ 0x3ff));
846   I.replaceAllUsesWith(IntrinsicCall);
847   I.eraseFromParent();
848   return true;
849 }
850 
851 static bool hasUnsafeFPMath(const Function &F) {
852   Attribute Attr = F.getFnAttribute("unsafe-fp-math");
853   return Attr.getValueAsBool();
854 }
855 
856 static std::pair<Value*, Value*> getMul64(IRBuilder<> &Builder,
857                                           Value *LHS, Value *RHS) {
858   Type *I32Ty = Builder.getInt32Ty();
859   Type *I64Ty = Builder.getInt64Ty();
860 
861   Value *LHS_EXT64 = Builder.CreateZExt(LHS, I64Ty);
862   Value *RHS_EXT64 = Builder.CreateZExt(RHS, I64Ty);
863   Value *MUL64 = Builder.CreateMul(LHS_EXT64, RHS_EXT64);
864   Value *Lo = Builder.CreateTrunc(MUL64, I32Ty);
865   Value *Hi = Builder.CreateLShr(MUL64, Builder.getInt64(32));
866   Hi = Builder.CreateTrunc(Hi, I32Ty);
867   return std::make_pair(Lo, Hi);
868 }
869 
870 static Value* getMulHu(IRBuilder<> &Builder, Value *LHS, Value *RHS) {
871   return getMul64(Builder, LHS, RHS).second;
872 }
873 
874 /// Figure out how many bits are really needed for this ddivision. \p AtLeast is
875 /// an optimization hint to bypass the second ComputeNumSignBits call if we the
876 /// first one is insufficient. Returns -1 on failure.
877 int AMDGPUCodeGenPrepare::getDivNumBits(BinaryOperator &I,
878                                         Value *Num, Value *Den,
879                                         unsigned AtLeast, bool IsSigned) const {
880   const DataLayout &DL = Mod->getDataLayout();
881   unsigned LHSSignBits = ComputeNumSignBits(Num, DL, 0, AC, &I);
882   if (LHSSignBits < AtLeast)
883     return -1;
884 
885   unsigned RHSSignBits = ComputeNumSignBits(Den, DL, 0, AC, &I);
886   if (RHSSignBits < AtLeast)
887     return -1;
888 
889   unsigned SignBits = std::min(LHSSignBits, RHSSignBits);
890   unsigned DivBits = Num->getType()->getScalarSizeInBits() - SignBits;
891   if (IsSigned)
892     ++DivBits;
893   return DivBits;
894 }
895 
896 // The fractional part of a float is enough to accurately represent up to
897 // a 24-bit signed integer.
898 Value *AMDGPUCodeGenPrepare::expandDivRem24(IRBuilder<> &Builder,
899                                             BinaryOperator &I,
900                                             Value *Num, Value *Den,
901                                             bool IsDiv, bool IsSigned) const {
902   int DivBits = getDivNumBits(I, Num, Den, 9, IsSigned);
903   if (DivBits == -1)
904     return nullptr;
905   return expandDivRem24Impl(Builder, I, Num, Den, DivBits, IsDiv, IsSigned);
906 }
907 
908 Value *AMDGPUCodeGenPrepare::expandDivRem24Impl(IRBuilder<> &Builder,
909                                                 BinaryOperator &I,
910                                                 Value *Num, Value *Den,
911                                                 unsigned DivBits,
912                                                 bool IsDiv, bool IsSigned) const {
913   Type *I32Ty = Builder.getInt32Ty();
914   Num = Builder.CreateTrunc(Num, I32Ty);
915   Den = Builder.CreateTrunc(Den, I32Ty);
916 
917   Type *F32Ty = Builder.getFloatTy();
918   ConstantInt *One = Builder.getInt32(1);
919   Value *JQ = One;
920 
921   if (IsSigned) {
922     // char|short jq = ia ^ ib;
923     JQ = Builder.CreateXor(Num, Den);
924 
925     // jq = jq >> (bitsize - 2)
926     JQ = Builder.CreateAShr(JQ, Builder.getInt32(30));
927 
928     // jq = jq | 0x1
929     JQ = Builder.CreateOr(JQ, One);
930   }
931 
932   // int ia = (int)LHS;
933   Value *IA = Num;
934 
935   // int ib, (int)RHS;
936   Value *IB = Den;
937 
938   // float fa = (float)ia;
939   Value *FA = IsSigned ? Builder.CreateSIToFP(IA, F32Ty)
940                        : Builder.CreateUIToFP(IA, F32Ty);
941 
942   // float fb = (float)ib;
943   Value *FB = IsSigned ? Builder.CreateSIToFP(IB,F32Ty)
944                        : Builder.CreateUIToFP(IB,F32Ty);
945 
946   Function *RcpDecl = Intrinsic::getDeclaration(Mod, Intrinsic::amdgcn_rcp,
947                                                 Builder.getFloatTy());
948   Value *RCP = Builder.CreateCall(RcpDecl, { FB });
949   Value *FQM = Builder.CreateFMul(FA, RCP);
950 
951   // fq = trunc(fqm);
952   CallInst *FQ = Builder.CreateUnaryIntrinsic(Intrinsic::trunc, FQM);
953   FQ->copyFastMathFlags(Builder.getFastMathFlags());
954 
955   // float fqneg = -fq;
956   Value *FQNeg = Builder.CreateFNeg(FQ);
957 
958   // float fr = mad(fqneg, fb, fa);
959   auto FMAD = !ST->hasMadMacF32Insts()
960                   ? Intrinsic::fma
961                   : (Intrinsic::ID)Intrinsic::amdgcn_fmad_ftz;
962   Value *FR = Builder.CreateIntrinsic(FMAD,
963                                       {FQNeg->getType()}, {FQNeg, FB, FA}, FQ);
964 
965   // int iq = (int)fq;
966   Value *IQ = IsSigned ? Builder.CreateFPToSI(FQ, I32Ty)
967                        : Builder.CreateFPToUI(FQ, I32Ty);
968 
969   // fr = fabs(fr);
970   FR = Builder.CreateUnaryIntrinsic(Intrinsic::fabs, FR, FQ);
971 
972   // fb = fabs(fb);
973   FB = Builder.CreateUnaryIntrinsic(Intrinsic::fabs, FB, FQ);
974 
975   // int cv = fr >= fb;
976   Value *CV = Builder.CreateFCmpOGE(FR, FB);
977 
978   // jq = (cv ? jq : 0);
979   JQ = Builder.CreateSelect(CV, JQ, Builder.getInt32(0));
980 
981   // dst = iq + jq;
982   Value *Div = Builder.CreateAdd(IQ, JQ);
983 
984   Value *Res = Div;
985   if (!IsDiv) {
986     // Rem needs compensation, it's easier to recompute it
987     Value *Rem = Builder.CreateMul(Div, Den);
988     Res = Builder.CreateSub(Num, Rem);
989   }
990 
991   if (DivBits != 0 && DivBits < 32) {
992     // Extend in register from the number of bits this divide really is.
993     if (IsSigned) {
994       int InRegBits = 32 - DivBits;
995 
996       Res = Builder.CreateShl(Res, InRegBits);
997       Res = Builder.CreateAShr(Res, InRegBits);
998     } else {
999       ConstantInt *TruncMask
1000         = Builder.getInt32((UINT64_C(1) << DivBits) - 1);
1001       Res = Builder.CreateAnd(Res, TruncMask);
1002     }
1003   }
1004 
1005   return Res;
1006 }
1007 
1008 // Try to recognize special cases the DAG will emit special, better expansions
1009 // than the general expansion we do here.
1010 
1011 // TODO: It would be better to just directly handle those optimizations here.
1012 bool AMDGPUCodeGenPrepare::divHasSpecialOptimization(
1013   BinaryOperator &I, Value *Num, Value *Den) const {
1014   if (Constant *C = dyn_cast<Constant>(Den)) {
1015     // Arbitrary constants get a better expansion as long as a wider mulhi is
1016     // legal.
1017     if (C->getType()->getScalarSizeInBits() <= 32)
1018       return true;
1019 
1020     // TODO: Sdiv check for not exact for some reason.
1021 
1022     // If there's no wider mulhi, there's only a better expansion for powers of
1023     // two.
1024     // TODO: Should really know for each vector element.
1025     if (isKnownToBeAPowerOfTwo(C, *DL, true, 0, AC, &I, DT))
1026       return true;
1027 
1028     return false;
1029   }
1030 
1031   if (BinaryOperator *BinOpDen = dyn_cast<BinaryOperator>(Den)) {
1032     // fold (udiv x, (shl c, y)) -> x >>u (log2(c)+y) iff c is power of 2
1033     if (BinOpDen->getOpcode() == Instruction::Shl &&
1034         isa<Constant>(BinOpDen->getOperand(0)) &&
1035         isKnownToBeAPowerOfTwo(BinOpDen->getOperand(0), *DL, true,
1036                                0, AC, &I, DT)) {
1037       return true;
1038     }
1039   }
1040 
1041   return false;
1042 }
1043 
1044 static Value *getSign32(Value *V, IRBuilder<> &Builder, const DataLayout *DL) {
1045   // Check whether the sign can be determined statically.
1046   KnownBits Known = computeKnownBits(V, *DL);
1047   if (Known.isNegative())
1048     return Constant::getAllOnesValue(V->getType());
1049   if (Known.isNonNegative())
1050     return Constant::getNullValue(V->getType());
1051   return Builder.CreateAShr(V, Builder.getInt32(31));
1052 }
1053 
1054 Value *AMDGPUCodeGenPrepare::expandDivRem32(IRBuilder<> &Builder,
1055                                             BinaryOperator &I, Value *X,
1056                                             Value *Y) const {
1057   Instruction::BinaryOps Opc = I.getOpcode();
1058   assert(Opc == Instruction::URem || Opc == Instruction::UDiv ||
1059          Opc == Instruction::SRem || Opc == Instruction::SDiv);
1060 
1061   FastMathFlags FMF;
1062   FMF.setFast();
1063   Builder.setFastMathFlags(FMF);
1064 
1065   if (divHasSpecialOptimization(I, X, Y))
1066     return nullptr;  // Keep it for later optimization.
1067 
1068   bool IsDiv = Opc == Instruction::UDiv || Opc == Instruction::SDiv;
1069   bool IsSigned = Opc == Instruction::SRem || Opc == Instruction::SDiv;
1070 
1071   Type *Ty = X->getType();
1072   Type *I32Ty = Builder.getInt32Ty();
1073   Type *F32Ty = Builder.getFloatTy();
1074 
1075   if (Ty->getScalarSizeInBits() < 32) {
1076     if (IsSigned) {
1077       X = Builder.CreateSExt(X, I32Ty);
1078       Y = Builder.CreateSExt(Y, I32Ty);
1079     } else {
1080       X = Builder.CreateZExt(X, I32Ty);
1081       Y = Builder.CreateZExt(Y, I32Ty);
1082     }
1083   }
1084 
1085   if (Value *Res = expandDivRem24(Builder, I, X, Y, IsDiv, IsSigned)) {
1086     return IsSigned ? Builder.CreateSExtOrTrunc(Res, Ty) :
1087                       Builder.CreateZExtOrTrunc(Res, Ty);
1088   }
1089 
1090   ConstantInt *Zero = Builder.getInt32(0);
1091   ConstantInt *One = Builder.getInt32(1);
1092 
1093   Value *Sign = nullptr;
1094   if (IsSigned) {
1095     Value *SignX = getSign32(X, Builder, DL);
1096     Value *SignY = getSign32(Y, Builder, DL);
1097     // Remainder sign is the same as LHS
1098     Sign = IsDiv ? Builder.CreateXor(SignX, SignY) : SignX;
1099 
1100     X = Builder.CreateAdd(X, SignX);
1101     Y = Builder.CreateAdd(Y, SignY);
1102 
1103     X = Builder.CreateXor(X, SignX);
1104     Y = Builder.CreateXor(Y, SignY);
1105   }
1106 
1107   // The algorithm here is based on ideas from "Software Integer Division", Tom
1108   // Rodeheffer, August 2008.
1109   //
1110   // unsigned udiv(unsigned x, unsigned y) {
1111   //   // Initial estimate of inv(y). The constant is less than 2^32 to ensure
1112   //   // that this is a lower bound on inv(y), even if some of the calculations
1113   //   // round up.
1114   //   unsigned z = (unsigned)((4294967296.0 - 512.0) * v_rcp_f32((float)y));
1115   //
1116   //   // One round of UNR (Unsigned integer Newton-Raphson) to improve z.
1117   //   // Empirically this is guaranteed to give a "two-y" lower bound on
1118   //   // inv(y).
1119   //   z += umulh(z, -y * z);
1120   //
1121   //   // Quotient/remainder estimate.
1122   //   unsigned q = umulh(x, z);
1123   //   unsigned r = x - q * y;
1124   //
1125   //   // Two rounds of quotient/remainder refinement.
1126   //   if (r >= y) {
1127   //     ++q;
1128   //     r -= y;
1129   //   }
1130   //   if (r >= y) {
1131   //     ++q;
1132   //     r -= y;
1133   //   }
1134   //
1135   //   return q;
1136   // }
1137 
1138   // Initial estimate of inv(y).
1139   Value *FloatY = Builder.CreateUIToFP(Y, F32Ty);
1140   Function *Rcp = Intrinsic::getDeclaration(Mod, Intrinsic::amdgcn_rcp, F32Ty);
1141   Value *RcpY = Builder.CreateCall(Rcp, {FloatY});
1142   Constant *Scale = ConstantFP::get(F32Ty, BitsToFloat(0x4F7FFFFE));
1143   Value *ScaledY = Builder.CreateFMul(RcpY, Scale);
1144   Value *Z = Builder.CreateFPToUI(ScaledY, I32Ty);
1145 
1146   // One round of UNR.
1147   Value *NegY = Builder.CreateSub(Zero, Y);
1148   Value *NegYZ = Builder.CreateMul(NegY, Z);
1149   Z = Builder.CreateAdd(Z, getMulHu(Builder, Z, NegYZ));
1150 
1151   // Quotient/remainder estimate.
1152   Value *Q = getMulHu(Builder, X, Z);
1153   Value *R = Builder.CreateSub(X, Builder.CreateMul(Q, Y));
1154 
1155   // First quotient/remainder refinement.
1156   Value *Cond = Builder.CreateICmpUGE(R, Y);
1157   if (IsDiv)
1158     Q = Builder.CreateSelect(Cond, Builder.CreateAdd(Q, One), Q);
1159   R = Builder.CreateSelect(Cond, Builder.CreateSub(R, Y), R);
1160 
1161   // Second quotient/remainder refinement.
1162   Cond = Builder.CreateICmpUGE(R, Y);
1163   Value *Res;
1164   if (IsDiv)
1165     Res = Builder.CreateSelect(Cond, Builder.CreateAdd(Q, One), Q);
1166   else
1167     Res = Builder.CreateSelect(Cond, Builder.CreateSub(R, Y), R);
1168 
1169   if (IsSigned) {
1170     Res = Builder.CreateXor(Res, Sign);
1171     Res = Builder.CreateSub(Res, Sign);
1172   }
1173 
1174   Res = Builder.CreateTrunc(Res, Ty);
1175 
1176   return Res;
1177 }
1178 
1179 Value *AMDGPUCodeGenPrepare::shrinkDivRem64(IRBuilder<> &Builder,
1180                                             BinaryOperator &I,
1181                                             Value *Num, Value *Den) const {
1182   if (!ExpandDiv64InIR && divHasSpecialOptimization(I, Num, Den))
1183     return nullptr;  // Keep it for later optimization.
1184 
1185   Instruction::BinaryOps Opc = I.getOpcode();
1186 
1187   bool IsDiv = Opc == Instruction::SDiv || Opc == Instruction::UDiv;
1188   bool IsSigned = Opc == Instruction::SDiv || Opc == Instruction::SRem;
1189 
1190   int NumDivBits = getDivNumBits(I, Num, Den, 32, IsSigned);
1191   if (NumDivBits == -1)
1192     return nullptr;
1193 
1194   Value *Narrowed = nullptr;
1195   if (NumDivBits <= 24) {
1196     Narrowed = expandDivRem24Impl(Builder, I, Num, Den, NumDivBits,
1197                                   IsDiv, IsSigned);
1198   } else if (NumDivBits <= 32) {
1199     Narrowed = expandDivRem32(Builder, I, Num, Den);
1200   }
1201 
1202   if (Narrowed) {
1203     return IsSigned ? Builder.CreateSExt(Narrowed, Num->getType()) :
1204                       Builder.CreateZExt(Narrowed, Num->getType());
1205   }
1206 
1207   return nullptr;
1208 }
1209 
1210 void AMDGPUCodeGenPrepare::expandDivRem64(BinaryOperator &I) const {
1211   Instruction::BinaryOps Opc = I.getOpcode();
1212   // Do the general expansion.
1213   if (Opc == Instruction::UDiv || Opc == Instruction::SDiv) {
1214     expandDivisionUpTo64Bits(&I);
1215     return;
1216   }
1217 
1218   if (Opc == Instruction::URem || Opc == Instruction::SRem) {
1219     expandRemainderUpTo64Bits(&I);
1220     return;
1221   }
1222 
1223   llvm_unreachable("not a division");
1224 }
1225 
1226 bool AMDGPUCodeGenPrepare::visitBinaryOperator(BinaryOperator &I) {
1227   if (foldBinOpIntoSelect(I))
1228     return true;
1229 
1230   if (ST->has16BitInsts() && needsPromotionToI32(I.getType()) &&
1231       DA->isUniform(&I) && promoteUniformOpToI32(I))
1232     return true;
1233 
1234   if (UseMul24Intrin && replaceMulWithMul24(I))
1235     return true;
1236 
1237   bool Changed = false;
1238   Instruction::BinaryOps Opc = I.getOpcode();
1239   Type *Ty = I.getType();
1240   Value *NewDiv = nullptr;
1241   unsigned ScalarSize = Ty->getScalarSizeInBits();
1242 
1243   SmallVector<BinaryOperator *, 8> Div64ToExpand;
1244 
1245   if ((Opc == Instruction::URem || Opc == Instruction::UDiv ||
1246        Opc == Instruction::SRem || Opc == Instruction::SDiv) &&
1247       ScalarSize <= 64 &&
1248       !DisableIDivExpand) {
1249     Value *Num = I.getOperand(0);
1250     Value *Den = I.getOperand(1);
1251     IRBuilder<> Builder(&I);
1252     Builder.SetCurrentDebugLocation(I.getDebugLoc());
1253 
1254     if (auto *VT = dyn_cast<FixedVectorType>(Ty)) {
1255       NewDiv = UndefValue::get(VT);
1256 
1257       for (unsigned N = 0, E = VT->getNumElements(); N != E; ++N) {
1258         Value *NumEltN = Builder.CreateExtractElement(Num, N);
1259         Value *DenEltN = Builder.CreateExtractElement(Den, N);
1260 
1261         Value *NewElt;
1262         if (ScalarSize <= 32) {
1263           NewElt = expandDivRem32(Builder, I, NumEltN, DenEltN);
1264           if (!NewElt)
1265             NewElt = Builder.CreateBinOp(Opc, NumEltN, DenEltN);
1266         } else {
1267           // See if this 64-bit division can be shrunk to 32/24-bits before
1268           // producing the general expansion.
1269           NewElt = shrinkDivRem64(Builder, I, NumEltN, DenEltN);
1270           if (!NewElt) {
1271             // The general 64-bit expansion introduces control flow and doesn't
1272             // return the new value. Just insert a scalar copy and defer
1273             // expanding it.
1274             NewElt = Builder.CreateBinOp(Opc, NumEltN, DenEltN);
1275             Div64ToExpand.push_back(cast<BinaryOperator>(NewElt));
1276           }
1277         }
1278 
1279         NewDiv = Builder.CreateInsertElement(NewDiv, NewElt, N);
1280       }
1281     } else {
1282       if (ScalarSize <= 32)
1283         NewDiv = expandDivRem32(Builder, I, Num, Den);
1284       else {
1285         NewDiv = shrinkDivRem64(Builder, I, Num, Den);
1286         if (!NewDiv)
1287           Div64ToExpand.push_back(&I);
1288       }
1289     }
1290 
1291     if (NewDiv) {
1292       I.replaceAllUsesWith(NewDiv);
1293       I.eraseFromParent();
1294       Changed = true;
1295     }
1296   }
1297 
1298   if (ExpandDiv64InIR) {
1299     // TODO: We get much worse code in specially handled constant cases.
1300     for (BinaryOperator *Div : Div64ToExpand) {
1301       expandDivRem64(*Div);
1302       Changed = true;
1303     }
1304   }
1305 
1306   return Changed;
1307 }
1308 
1309 bool AMDGPUCodeGenPrepare::visitLoadInst(LoadInst &I) {
1310   if (!WidenLoads)
1311     return false;
1312 
1313   if ((I.getPointerAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS ||
1314        I.getPointerAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT) &&
1315       canWidenScalarExtLoad(I)) {
1316     IRBuilder<> Builder(&I);
1317     Builder.SetCurrentDebugLocation(I.getDebugLoc());
1318 
1319     Type *I32Ty = Builder.getInt32Ty();
1320     Type *PT = PointerType::get(I32Ty, I.getPointerAddressSpace());
1321     Value *BitCast= Builder.CreateBitCast(I.getPointerOperand(), PT);
1322     LoadInst *WidenLoad = Builder.CreateLoad(I32Ty, BitCast);
1323     WidenLoad->copyMetadata(I);
1324 
1325     // If we have range metadata, we need to convert the type, and not make
1326     // assumptions about the high bits.
1327     if (auto *Range = WidenLoad->getMetadata(LLVMContext::MD_range)) {
1328       ConstantInt *Lower =
1329         mdconst::extract<ConstantInt>(Range->getOperand(0));
1330 
1331       if (Lower->isNullValue()) {
1332         WidenLoad->setMetadata(LLVMContext::MD_range, nullptr);
1333       } else {
1334         Metadata *LowAndHigh[] = {
1335           ConstantAsMetadata::get(ConstantInt::get(I32Ty, Lower->getValue().zext(32))),
1336           // Don't make assumptions about the high bits.
1337           ConstantAsMetadata::get(ConstantInt::get(I32Ty, 0))
1338         };
1339 
1340         WidenLoad->setMetadata(LLVMContext::MD_range,
1341                                MDNode::get(Mod->getContext(), LowAndHigh));
1342       }
1343     }
1344 
1345     int TySize = Mod->getDataLayout().getTypeSizeInBits(I.getType());
1346     Type *IntNTy = Builder.getIntNTy(TySize);
1347     Value *ValTrunc = Builder.CreateTrunc(WidenLoad, IntNTy);
1348     Value *ValOrig = Builder.CreateBitCast(ValTrunc, I.getType());
1349     I.replaceAllUsesWith(ValOrig);
1350     I.eraseFromParent();
1351     return true;
1352   }
1353 
1354   return false;
1355 }
1356 
1357 bool AMDGPUCodeGenPrepare::visitICmpInst(ICmpInst &I) {
1358   bool Changed = false;
1359 
1360   if (ST->has16BitInsts() && needsPromotionToI32(I.getOperand(0)->getType()) &&
1361       DA->isUniform(&I))
1362     Changed |= promoteUniformOpToI32(I);
1363 
1364   return Changed;
1365 }
1366 
1367 bool AMDGPUCodeGenPrepare::visitSelectInst(SelectInst &I) {
1368   bool Changed = false;
1369 
1370   if (ST->has16BitInsts() && needsPromotionToI32(I.getType()) &&
1371       DA->isUniform(&I))
1372     Changed |= promoteUniformOpToI32(I);
1373 
1374   return Changed;
1375 }
1376 
1377 bool AMDGPUCodeGenPrepare::visitIntrinsicInst(IntrinsicInst &I) {
1378   switch (I.getIntrinsicID()) {
1379   case Intrinsic::bitreverse:
1380     return visitBitreverseIntrinsicInst(I);
1381   default:
1382     return false;
1383   }
1384 }
1385 
1386 bool AMDGPUCodeGenPrepare::visitBitreverseIntrinsicInst(IntrinsicInst &I) {
1387   bool Changed = false;
1388 
1389   if (ST->has16BitInsts() && needsPromotionToI32(I.getType()) &&
1390       DA->isUniform(&I))
1391     Changed |= promoteUniformBitreverseToI32(I);
1392 
1393   return Changed;
1394 }
1395 
1396 bool AMDGPUCodeGenPrepare::doInitialization(Module &M) {
1397   Mod = &M;
1398   DL = &Mod->getDataLayout();
1399   return false;
1400 }
1401 
1402 bool AMDGPUCodeGenPrepare::runOnFunction(Function &F) {
1403   if (skipFunction(F))
1404     return false;
1405 
1406   auto *TPC = getAnalysisIfAvailable<TargetPassConfig>();
1407   if (!TPC)
1408     return false;
1409 
1410   const AMDGPUTargetMachine &TM = TPC->getTM<AMDGPUTargetMachine>();
1411   ST = &TM.getSubtarget<GCNSubtarget>(F);
1412   AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
1413   DA = &getAnalysis<LegacyDivergenceAnalysis>();
1414 
1415   auto *DTWP = getAnalysisIfAvailable<DominatorTreeWrapperPass>();
1416   DT = DTWP ? &DTWP->getDomTree() : nullptr;
1417 
1418   HasUnsafeFPMath = hasUnsafeFPMath(F);
1419 
1420   AMDGPU::SIModeRegisterDefaults Mode(F);
1421   HasFP32Denormals = Mode.allFP32Denormals();
1422 
1423   bool MadeChange = false;
1424 
1425   Function::iterator NextBB;
1426   for (Function::iterator FI = F.begin(), FE = F.end(); FI != FE; FI = NextBB) {
1427     BasicBlock *BB = &*FI;
1428     NextBB = std::next(FI);
1429 
1430     BasicBlock::iterator Next;
1431     for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; I = Next) {
1432       Next = std::next(I);
1433 
1434       MadeChange |= visit(*I);
1435 
1436       if (Next != E) { // Control flow changed
1437         BasicBlock *NextInstBB = Next->getParent();
1438         if (NextInstBB != BB) {
1439           BB = NextInstBB;
1440           E = BB->end();
1441           FE = F.end();
1442         }
1443       }
1444     }
1445   }
1446 
1447   return MadeChange;
1448 }
1449 
1450 INITIALIZE_PASS_BEGIN(AMDGPUCodeGenPrepare, DEBUG_TYPE,
1451                       "AMDGPU IR optimizations", false, false)
1452 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
1453 INITIALIZE_PASS_DEPENDENCY(LegacyDivergenceAnalysis)
1454 INITIALIZE_PASS_END(AMDGPUCodeGenPrepare, DEBUG_TYPE, "AMDGPU IR optimizations",
1455                     false, false)
1456 
1457 char AMDGPUCodeGenPrepare::ID = 0;
1458 
1459 FunctionPass *llvm::createAMDGPUCodeGenPreparePass() {
1460   return new AMDGPUCodeGenPrepare();
1461 }
1462