186de486dSMatt Arsenault //===-- AMDGPUCodeGenPrepare.cpp ------------------------------------------===// 286de486dSMatt Arsenault // 32946cd70SChandler Carruth // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 42946cd70SChandler Carruth // See https://llvm.org/LICENSE.txt for license information. 52946cd70SChandler Carruth // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 686de486dSMatt Arsenault // 786de486dSMatt Arsenault //===----------------------------------------------------------------------===// 886de486dSMatt Arsenault // 986de486dSMatt Arsenault /// \file 1086de486dSMatt Arsenault /// This pass does misc. AMDGPU optimizations on IR before instruction 1186de486dSMatt Arsenault /// selection. 1286de486dSMatt Arsenault // 1386de486dSMatt Arsenault //===----------------------------------------------------------------------===// 1486de486dSMatt Arsenault 1586de486dSMatt Arsenault #include "AMDGPU.h" 16a1fe17c9SMatt Arsenault #include "AMDGPUTargetMachine.h" 177e7268acSStanislav Mekhanoshin #include "llvm/Analysis/AssumptionCache.h" 18bcd91778SMatt Arsenault #include "llvm/Analysis/ConstantFolding.h" 1935617ed4SNicolai Haehnle #include "llvm/Analysis/LegacyDivergenceAnalysis.h" 2067aa18f1SStanislav Mekhanoshin #include "llvm/Analysis/ValueTracking.h" 218b61764cSFrancis Visoiu Mistrih #include "llvm/CodeGen/TargetPassConfig.h" 22a7aaadc1SFlorian Hahn #include "llvm/IR/Dominators.h" 236bda14b3SChandler Carruth #include "llvm/IR/InstVisitor.h" 246a87e9b0Sdfukalov #include "llvm/IR/IntrinsicsAMDGPU.h" 2599142003SNikita Popov #include "llvm/IR/IRBuilder.h" 2605da2fe5SReid Kleckner #include "llvm/InitializePasses.h" 27734bb7bbSEugene Zelenko #include "llvm/Pass.h" 281673a080SSimon Pilgrim #include "llvm/Support/KnownBits.h" 29a7aaadc1SFlorian Hahn #include "llvm/Transforms/Utils/IntegerDivision.h" 3086de486dSMatt Arsenault 3186de486dSMatt Arsenault #define DEBUG_TYPE "amdgpu-codegenprepare" 3286de486dSMatt Arsenault 3386de486dSMatt Arsenault using namespace llvm; 3486de486dSMatt Arsenault 3586de486dSMatt Arsenault namespace { 3686de486dSMatt Arsenault 3790083d30SMatt Arsenault static cl::opt<bool> WidenLoads( 3890083d30SMatt Arsenault "amdgpu-codegenprepare-widen-constant-loads", 3990083d30SMatt Arsenault cl::desc("Widen sub-dword constant address space loads in AMDGPUCodeGenPrepare"), 4090083d30SMatt Arsenault cl::ReallyHidden, 4144920e85SStanislav Mekhanoshin cl::init(false)); 4290083d30SMatt Arsenault 4375e6f0b3SMatt Arsenault static cl::opt<bool> Widen16BitOps( 4475e6f0b3SMatt Arsenault "amdgpu-codegenprepare-widen-16-bit-ops", 4575e6f0b3SMatt Arsenault cl::desc("Widen uniform 16-bit instructions to 32-bit in AMDGPUCodeGenPrepare"), 4675e6f0b3SMatt Arsenault cl::ReallyHidden, 4775e6f0b3SMatt Arsenault cl::init(true)); 4875e6f0b3SMatt Arsenault 49b3dd381aSMatt Arsenault static cl::opt<bool> UseMul24Intrin( 50b3dd381aSMatt Arsenault "amdgpu-codegenprepare-mul24", 51b3dd381aSMatt Arsenault cl::desc("Introduce mul24 intrinsics in AMDGPUCodeGenPrepare"), 52b3dd381aSMatt Arsenault cl::ReallyHidden, 53b3dd381aSMatt Arsenault cl::init(true)); 54b3dd381aSMatt Arsenault 559ec66860SMatt Arsenault // Legalize 64-bit division by using the generic IR expansion. 5634d9a16eSMatt Arsenault static cl::opt<bool> ExpandDiv64InIR( 5734d9a16eSMatt Arsenault "amdgpu-codegenprepare-expand-div64", 5834d9a16eSMatt Arsenault cl::desc("Expand 64-bit division in AMDGPUCodeGenPrepare"), 5934d9a16eSMatt Arsenault cl::ReallyHidden, 6034d9a16eSMatt Arsenault cl::init(false)); 6134d9a16eSMatt Arsenault 629ec66860SMatt Arsenault // Leave all division operations as they are. This supersedes ExpandDiv64InIR 639ec66860SMatt Arsenault // and is used for testing the legalizer. 649ec66860SMatt Arsenault static cl::opt<bool> DisableIDivExpand( 659ec66860SMatt Arsenault "amdgpu-codegenprepare-disable-idiv-expansion", 669ec66860SMatt Arsenault cl::desc("Prevent expanding integer division in AMDGPUCodeGenPrepare"), 679ec66860SMatt Arsenault cl::ReallyHidden, 689ec66860SMatt Arsenault cl::init(false)); 699ec66860SMatt Arsenault 7086de486dSMatt Arsenault class AMDGPUCodeGenPrepare : public FunctionPass, 71a1fe17c9SMatt Arsenault public InstVisitor<AMDGPUCodeGenPrepare, bool> { 725bfbae5cSTom Stellard const GCNSubtarget *ST = nullptr; 737e7268acSStanislav Mekhanoshin AssumptionCache *AC = nullptr; 74b30e1223SMatt Arsenault DominatorTree *DT = nullptr; 7535617ed4SNicolai Haehnle LegacyDivergenceAnalysis *DA = nullptr; 76734bb7bbSEugene Zelenko Module *Mod = nullptr; 7749169a96SMatt Arsenault const DataLayout *DL = nullptr; 78734bb7bbSEugene Zelenko bool HasUnsafeFPMath = false; 79db0ed3e4SMatt Arsenault bool HasFP32Denormals = false; 8086de486dSMatt Arsenault 815f8f34e4SAdrian Prantl /// Copies exact/nsw/nuw flags (if any) from binary operation \p I to 82f74fc60aSKonstantin Zhuravlyov /// binary operation \p V. 83e14df4b2SKonstantin Zhuravlyov /// 84f74fc60aSKonstantin Zhuravlyov /// \returns Binary operation \p V. 85f74fc60aSKonstantin Zhuravlyov /// \returns \p T's base element bit width. 86f74fc60aSKonstantin Zhuravlyov unsigned getBaseElementBitWidth(const Type *T) const; 87e14df4b2SKonstantin Zhuravlyov 88f74fc60aSKonstantin Zhuravlyov /// \returns Equivalent 32 bit integer type for given type \p T. For example, 89f74fc60aSKonstantin Zhuravlyov /// if \p T is i7, then i32 is returned; if \p T is <3 x i12>, then <3 x i32> 90f74fc60aSKonstantin Zhuravlyov /// is returned. 91e14df4b2SKonstantin Zhuravlyov Type *getI32Ty(IRBuilder<> &B, const Type *T) const; 92e14df4b2SKonstantin Zhuravlyov 93e14df4b2SKonstantin Zhuravlyov /// \returns True if binary operation \p I is a signed binary operation, false 94e14df4b2SKonstantin Zhuravlyov /// otherwise. 95e14df4b2SKonstantin Zhuravlyov bool isSigned(const BinaryOperator &I) const; 96e14df4b2SKonstantin Zhuravlyov 97e14df4b2SKonstantin Zhuravlyov /// \returns True if the condition of 'select' operation \p I comes from a 98e14df4b2SKonstantin Zhuravlyov /// signed 'icmp' operation, false otherwise. 99e14df4b2SKonstantin Zhuravlyov bool isSigned(const SelectInst &I) const; 100e14df4b2SKonstantin Zhuravlyov 101f74fc60aSKonstantin Zhuravlyov /// \returns True if type \p T needs to be promoted to 32 bit integer type, 102f74fc60aSKonstantin Zhuravlyov /// false otherwise. 103f74fc60aSKonstantin Zhuravlyov bool needsPromotionToI32(const Type *T) const; 104e14df4b2SKonstantin Zhuravlyov 1055f8f34e4SAdrian Prantl /// Promotes uniform binary operation \p I to equivalent 32 bit binary 106f74fc60aSKonstantin Zhuravlyov /// operation. 107f74fc60aSKonstantin Zhuravlyov /// 108f74fc60aSKonstantin Zhuravlyov /// \details \p I's base element bit width must be greater than 1 and less 109f74fc60aSKonstantin Zhuravlyov /// than or equal 16. Promotion is done by sign or zero extending operands to 110f74fc60aSKonstantin Zhuravlyov /// 32 bits, replacing \p I with equivalent 32 bit binary operation, and 111f74fc60aSKonstantin Zhuravlyov /// truncating the result of 32 bit binary operation back to \p I's original 112f74fc60aSKonstantin Zhuravlyov /// type. Division operation is not promoted. 113f74fc60aSKonstantin Zhuravlyov /// 114f74fc60aSKonstantin Zhuravlyov /// \returns True if \p I is promoted to equivalent 32 bit binary operation, 115f74fc60aSKonstantin Zhuravlyov /// false otherwise. 116f74fc60aSKonstantin Zhuravlyov bool promoteUniformOpToI32(BinaryOperator &I) const; 117f74fc60aSKonstantin Zhuravlyov 1185f8f34e4SAdrian Prantl /// Promotes uniform 'icmp' operation \p I to 32 bit 'icmp' operation. 119f74fc60aSKonstantin Zhuravlyov /// 120f74fc60aSKonstantin Zhuravlyov /// \details \p I's base element bit width must be greater than 1 and less 121f74fc60aSKonstantin Zhuravlyov /// than or equal 16. Promotion is done by sign or zero extending operands to 122f74fc60aSKonstantin Zhuravlyov /// 32 bits, and replacing \p I with 32 bit 'icmp' operation. 123e14df4b2SKonstantin Zhuravlyov /// 124e14df4b2SKonstantin Zhuravlyov /// \returns True. 125f74fc60aSKonstantin Zhuravlyov bool promoteUniformOpToI32(ICmpInst &I) const; 126e14df4b2SKonstantin Zhuravlyov 1275f8f34e4SAdrian Prantl /// Promotes uniform 'select' operation \p I to 32 bit 'select' 128f74fc60aSKonstantin Zhuravlyov /// operation. 129f74fc60aSKonstantin Zhuravlyov /// 130f74fc60aSKonstantin Zhuravlyov /// \details \p I's base element bit width must be greater than 1 and less 131f74fc60aSKonstantin Zhuravlyov /// than or equal 16. Promotion is done by sign or zero extending operands to 132f74fc60aSKonstantin Zhuravlyov /// 32 bits, replacing \p I with 32 bit 'select' operation, and truncating the 133f74fc60aSKonstantin Zhuravlyov /// result of 32 bit 'select' operation back to \p I's original type. 134e14df4b2SKonstantin Zhuravlyov /// 135e14df4b2SKonstantin Zhuravlyov /// \returns True. 136f74fc60aSKonstantin Zhuravlyov bool promoteUniformOpToI32(SelectInst &I) const; 137b4eb5d50SKonstantin Zhuravlyov 1385f8f34e4SAdrian Prantl /// Promotes uniform 'bitreverse' intrinsic \p I to 32 bit 'bitreverse' 139f74fc60aSKonstantin Zhuravlyov /// intrinsic. 140f74fc60aSKonstantin Zhuravlyov /// 141f74fc60aSKonstantin Zhuravlyov /// \details \p I's base element bit width must be greater than 1 and less 142f74fc60aSKonstantin Zhuravlyov /// than or equal 16. Promotion is done by zero extending the operand to 32 143f74fc60aSKonstantin Zhuravlyov /// bits, replacing \p I with 32 bit 'bitreverse' intrinsic, shifting the 144f74fc60aSKonstantin Zhuravlyov /// result of 32 bit 'bitreverse' intrinsic to the right with zero fill (the 145f74fc60aSKonstantin Zhuravlyov /// shift amount is 32 minus \p I's base element bit width), and truncating 146f74fc60aSKonstantin Zhuravlyov /// the result of the shift operation back to \p I's original type. 147b4eb5d50SKonstantin Zhuravlyov /// 148b4eb5d50SKonstantin Zhuravlyov /// \returns True. 149f74fc60aSKonstantin Zhuravlyov bool promoteUniformBitreverseToI32(IntrinsicInst &I) const; 15067aa18f1SStanislav Mekhanoshin 15149169a96SMatt Arsenault 15249169a96SMatt Arsenault unsigned numBitsUnsigned(Value *Op, unsigned ScalarSize) const; 15349169a96SMatt Arsenault unsigned numBitsSigned(Value *Op, unsigned ScalarSize) const; 15449169a96SMatt Arsenault bool isI24(Value *V, unsigned ScalarSize) const; 15549169a96SMatt Arsenault bool isU24(Value *V, unsigned ScalarSize) const; 15649169a96SMatt Arsenault 15749169a96SMatt Arsenault /// Replace mul instructions with llvm.amdgcn.mul.u24 or llvm.amdgcn.mul.s24. 15849169a96SMatt Arsenault /// SelectionDAG has an issue where an and asserting the bits are known 15949169a96SMatt Arsenault bool replaceMulWithMul24(BinaryOperator &I) const; 16049169a96SMatt Arsenault 161bcd91778SMatt Arsenault /// Perform same function as equivalently named function in DAGCombiner. Since 162bcd91778SMatt Arsenault /// we expand some divisions here, we need to perform this before obscuring. 163bcd91778SMatt Arsenault bool foldBinOpIntoSelect(BinaryOperator &I) const; 164bcd91778SMatt Arsenault 165b30e1223SMatt Arsenault bool divHasSpecialOptimization(BinaryOperator &I, 166b30e1223SMatt Arsenault Value *Num, Value *Den) const; 16734d9a16eSMatt Arsenault int getDivNumBits(BinaryOperator &I, 16834d9a16eSMatt Arsenault Value *Num, Value *Den, 16934d9a16eSMatt Arsenault unsigned AtLeast, bool Signed) const; 170b30e1223SMatt Arsenault 17167aa18f1SStanislav Mekhanoshin /// Expands 24 bit div or rem. 1727e7268acSStanislav Mekhanoshin Value* expandDivRem24(IRBuilder<> &Builder, BinaryOperator &I, 1737e7268acSStanislav Mekhanoshin Value *Num, Value *Den, 17467aa18f1SStanislav Mekhanoshin bool IsDiv, bool IsSigned) const; 17567aa18f1SStanislav Mekhanoshin 17634d9a16eSMatt Arsenault Value *expandDivRem24Impl(IRBuilder<> &Builder, BinaryOperator &I, 17734d9a16eSMatt Arsenault Value *Num, Value *Den, unsigned NumBits, 17834d9a16eSMatt Arsenault bool IsDiv, bool IsSigned) const; 17934d9a16eSMatt Arsenault 18067aa18f1SStanislav Mekhanoshin /// Expands 32 bit div or rem. 1817e7268acSStanislav Mekhanoshin Value* expandDivRem32(IRBuilder<> &Builder, BinaryOperator &I, 18267aa18f1SStanislav Mekhanoshin Value *Num, Value *Den) const; 18367aa18f1SStanislav Mekhanoshin 18434d9a16eSMatt Arsenault Value *shrinkDivRem64(IRBuilder<> &Builder, BinaryOperator &I, 18534d9a16eSMatt Arsenault Value *Num, Value *Den) const; 18634d9a16eSMatt Arsenault void expandDivRem64(BinaryOperator &I) const; 18734d9a16eSMatt Arsenault 1885f8f34e4SAdrian Prantl /// Widen a scalar load. 189a126a13bSWei Ding /// 190a126a13bSWei Ding /// \details \p Widen scalar load for uniform, small type loads from constant 191a126a13bSWei Ding // memory / to a full 32-bits and then truncate the input to allow a scalar 192a126a13bSWei Ding // load instead of a vector load. 193a126a13bSWei Ding // 194a126a13bSWei Ding /// \returns True. 195a126a13bSWei Ding 196a126a13bSWei Ding bool canWidenScalarExtLoad(LoadInst &I) const; 197e14df4b2SKonstantin Zhuravlyov 19886de486dSMatt Arsenault public: 19986de486dSMatt Arsenault static char ID; 200734bb7bbSEugene Zelenko 2018b61764cSFrancis Visoiu Mistrih AMDGPUCodeGenPrepare() : FunctionPass(ID) {} 202a1fe17c9SMatt Arsenault 203a1fe17c9SMatt Arsenault bool visitFDiv(BinaryOperator &I); 2042e5dc4a1SAnshil Gandhi bool visitXor(BinaryOperator &I); 205a1fe17c9SMatt Arsenault 206e14df4b2SKonstantin Zhuravlyov bool visitInstruction(Instruction &I) { return false; } 207e14df4b2SKonstantin Zhuravlyov bool visitBinaryOperator(BinaryOperator &I); 208a126a13bSWei Ding bool visitLoadInst(LoadInst &I); 209e14df4b2SKonstantin Zhuravlyov bool visitICmpInst(ICmpInst &I); 210e14df4b2SKonstantin Zhuravlyov bool visitSelectInst(SelectInst &I); 21186de486dSMatt Arsenault 212b4eb5d50SKonstantin Zhuravlyov bool visitIntrinsicInst(IntrinsicInst &I); 213b4eb5d50SKonstantin Zhuravlyov bool visitBitreverseIntrinsicInst(IntrinsicInst &I); 214b4eb5d50SKonstantin Zhuravlyov 21586de486dSMatt Arsenault bool doInitialization(Module &M) override; 21686de486dSMatt Arsenault bool runOnFunction(Function &F) override; 21786de486dSMatt Arsenault 218117296c0SMehdi Amini StringRef getPassName() const override { return "AMDGPU IR optimizations"; } 21986de486dSMatt Arsenault 22086de486dSMatt Arsenault void getAnalysisUsage(AnalysisUsage &AU) const override { 2217e7268acSStanislav Mekhanoshin AU.addRequired<AssumptionCacheTracker>(); 22235617ed4SNicolai Haehnle AU.addRequired<LegacyDivergenceAnalysis>(); 22365dbdc32SMatt Arsenault 22465dbdc32SMatt Arsenault // FIXME: Division expansion needs to preserve the dominator tree. 22565dbdc32SMatt Arsenault if (!ExpandDiv64InIR) 22686de486dSMatt Arsenault AU.setPreservesAll(); 22786de486dSMatt Arsenault } 22886de486dSMatt Arsenault }; 22986de486dSMatt Arsenault 230734bb7bbSEugene Zelenko } // end anonymous namespace 23186de486dSMatt Arsenault 232f74fc60aSKonstantin Zhuravlyov unsigned AMDGPUCodeGenPrepare::getBaseElementBitWidth(const Type *T) const { 233f74fc60aSKonstantin Zhuravlyov assert(needsPromotionToI32(T) && "T does not need promotion to i32"); 234e14df4b2SKonstantin Zhuravlyov 235e14df4b2SKonstantin Zhuravlyov if (T->isIntegerTy()) 236f74fc60aSKonstantin Zhuravlyov return T->getIntegerBitWidth(); 237f74fc60aSKonstantin Zhuravlyov return cast<VectorType>(T)->getElementType()->getIntegerBitWidth(); 238e14df4b2SKonstantin Zhuravlyov } 239e14df4b2SKonstantin Zhuravlyov 240e14df4b2SKonstantin Zhuravlyov Type *AMDGPUCodeGenPrepare::getI32Ty(IRBuilder<> &B, const Type *T) const { 241f74fc60aSKonstantin Zhuravlyov assert(needsPromotionToI32(T) && "T does not need promotion to i32"); 242e14df4b2SKonstantin Zhuravlyov 243e14df4b2SKonstantin Zhuravlyov if (T->isIntegerTy()) 244e14df4b2SKonstantin Zhuravlyov return B.getInt32Ty(); 2453254a001SChristopher Tetreault return FixedVectorType::get(B.getInt32Ty(), cast<FixedVectorType>(T)); 246e14df4b2SKonstantin Zhuravlyov } 247e14df4b2SKonstantin Zhuravlyov 248e14df4b2SKonstantin Zhuravlyov bool AMDGPUCodeGenPrepare::isSigned(const BinaryOperator &I) const { 249691e2e02SKonstantin Zhuravlyov return I.getOpcode() == Instruction::AShr || 250691e2e02SKonstantin Zhuravlyov I.getOpcode() == Instruction::SDiv || I.getOpcode() == Instruction::SRem; 251e14df4b2SKonstantin Zhuravlyov } 252e14df4b2SKonstantin Zhuravlyov 253e14df4b2SKonstantin Zhuravlyov bool AMDGPUCodeGenPrepare::isSigned(const SelectInst &I) const { 254e14df4b2SKonstantin Zhuravlyov return isa<ICmpInst>(I.getOperand(0)) ? 255e14df4b2SKonstantin Zhuravlyov cast<ICmpInst>(I.getOperand(0))->isSigned() : false; 256e14df4b2SKonstantin Zhuravlyov } 257e14df4b2SKonstantin Zhuravlyov 258f74fc60aSKonstantin Zhuravlyov bool AMDGPUCodeGenPrepare::needsPromotionToI32(const Type *T) const { 25975e6f0b3SMatt Arsenault if (!Widen16BitOps) 26075e6f0b3SMatt Arsenault return false; 26175e6f0b3SMatt Arsenault 262eb522e68SMatt Arsenault const IntegerType *IntTy = dyn_cast<IntegerType>(T); 263eb522e68SMatt Arsenault if (IntTy && IntTy->getBitWidth() > 1 && IntTy->getBitWidth() <= 16) 264f74fc60aSKonstantin Zhuravlyov return true; 265eb522e68SMatt Arsenault 266eb522e68SMatt Arsenault if (const VectorType *VT = dyn_cast<VectorType>(T)) { 267eb522e68SMatt Arsenault // TODO: The set of packed operations is more limited, so may want to 268eb522e68SMatt Arsenault // promote some anyway. 269eb522e68SMatt Arsenault if (ST->hasVOP3PInsts()) 270f74fc60aSKonstantin Zhuravlyov return false; 271eb522e68SMatt Arsenault 272eb522e68SMatt Arsenault return needsPromotionToI32(VT->getElementType()); 273eb522e68SMatt Arsenault } 274eb522e68SMatt Arsenault 275eb522e68SMatt Arsenault return false; 276f74fc60aSKonstantin Zhuravlyov } 277e14df4b2SKonstantin Zhuravlyov 278d59e6404SMatt Arsenault // Return true if the op promoted to i32 should have nsw set. 279d59e6404SMatt Arsenault static bool promotedOpIsNSW(const Instruction &I) { 280d59e6404SMatt Arsenault switch (I.getOpcode()) { 281d59e6404SMatt Arsenault case Instruction::Shl: 282d59e6404SMatt Arsenault case Instruction::Add: 283d59e6404SMatt Arsenault case Instruction::Sub: 284d59e6404SMatt Arsenault return true; 285d59e6404SMatt Arsenault case Instruction::Mul: 286d59e6404SMatt Arsenault return I.hasNoUnsignedWrap(); 287d59e6404SMatt Arsenault default: 288d59e6404SMatt Arsenault return false; 289d59e6404SMatt Arsenault } 290d59e6404SMatt Arsenault } 291d59e6404SMatt Arsenault 292d59e6404SMatt Arsenault // Return true if the op promoted to i32 should have nuw set. 293d59e6404SMatt Arsenault static bool promotedOpIsNUW(const Instruction &I) { 294d59e6404SMatt Arsenault switch (I.getOpcode()) { 295d59e6404SMatt Arsenault case Instruction::Shl: 296d59e6404SMatt Arsenault case Instruction::Add: 297d59e6404SMatt Arsenault case Instruction::Mul: 298d59e6404SMatt Arsenault return true; 299d59e6404SMatt Arsenault case Instruction::Sub: 300d59e6404SMatt Arsenault return I.hasNoUnsignedWrap(); 301d59e6404SMatt Arsenault default: 302d59e6404SMatt Arsenault return false; 303d59e6404SMatt Arsenault } 304d59e6404SMatt Arsenault } 305d59e6404SMatt Arsenault 306a126a13bSWei Ding bool AMDGPUCodeGenPrepare::canWidenScalarExtLoad(LoadInst &I) const { 307a126a13bSWei Ding Type *Ty = I.getType(); 308a126a13bSWei Ding const DataLayout &DL = Mod->getDataLayout(); 309a126a13bSWei Ding int TySize = DL.getTypeSizeInBits(Ty); 31052911428SGuillaume Chatelet Align Alignment = DL.getValueOrABITypeAlignment(I.getAlign(), Ty); 311a126a13bSWei Ding 31252911428SGuillaume Chatelet return I.isSimple() && TySize < 32 && Alignment >= 4 && DA->isUniform(&I); 313a126a13bSWei Ding } 314a126a13bSWei Ding 315f74fc60aSKonstantin Zhuravlyov bool AMDGPUCodeGenPrepare::promoteUniformOpToI32(BinaryOperator &I) const { 316f74fc60aSKonstantin Zhuravlyov assert(needsPromotionToI32(I.getType()) && 317f74fc60aSKonstantin Zhuravlyov "I does not need promotion to i32"); 318f74fc60aSKonstantin Zhuravlyov 319f74fc60aSKonstantin Zhuravlyov if (I.getOpcode() == Instruction::SDiv || 32067aa18f1SStanislav Mekhanoshin I.getOpcode() == Instruction::UDiv || 32167aa18f1SStanislav Mekhanoshin I.getOpcode() == Instruction::SRem || 32267aa18f1SStanislav Mekhanoshin I.getOpcode() == Instruction::URem) 323e14df4b2SKonstantin Zhuravlyov return false; 324e14df4b2SKonstantin Zhuravlyov 325e14df4b2SKonstantin Zhuravlyov IRBuilder<> Builder(&I); 326e14df4b2SKonstantin Zhuravlyov Builder.SetCurrentDebugLocation(I.getDebugLoc()); 327e14df4b2SKonstantin Zhuravlyov 328e14df4b2SKonstantin Zhuravlyov Type *I32Ty = getI32Ty(Builder, I.getType()); 329e14df4b2SKonstantin Zhuravlyov Value *ExtOp0 = nullptr; 330e14df4b2SKonstantin Zhuravlyov Value *ExtOp1 = nullptr; 331e14df4b2SKonstantin Zhuravlyov Value *ExtRes = nullptr; 332e14df4b2SKonstantin Zhuravlyov Value *TruncRes = nullptr; 333e14df4b2SKonstantin Zhuravlyov 334e14df4b2SKonstantin Zhuravlyov if (isSigned(I)) { 335e14df4b2SKonstantin Zhuravlyov ExtOp0 = Builder.CreateSExt(I.getOperand(0), I32Ty); 336e14df4b2SKonstantin Zhuravlyov ExtOp1 = Builder.CreateSExt(I.getOperand(1), I32Ty); 337e14df4b2SKonstantin Zhuravlyov } else { 338e14df4b2SKonstantin Zhuravlyov ExtOp0 = Builder.CreateZExt(I.getOperand(0), I32Ty); 339e14df4b2SKonstantin Zhuravlyov ExtOp1 = Builder.CreateZExt(I.getOperand(1), I32Ty); 340e14df4b2SKonstantin Zhuravlyov } 341d59e6404SMatt Arsenault 342d59e6404SMatt Arsenault ExtRes = Builder.CreateBinOp(I.getOpcode(), ExtOp0, ExtOp1); 343d59e6404SMatt Arsenault if (Instruction *Inst = dyn_cast<Instruction>(ExtRes)) { 344d59e6404SMatt Arsenault if (promotedOpIsNSW(cast<Instruction>(I))) 345d59e6404SMatt Arsenault Inst->setHasNoSignedWrap(); 346d59e6404SMatt Arsenault 347d59e6404SMatt Arsenault if (promotedOpIsNUW(cast<Instruction>(I))) 348d59e6404SMatt Arsenault Inst->setHasNoUnsignedWrap(); 349d59e6404SMatt Arsenault 350d59e6404SMatt Arsenault if (const auto *ExactOp = dyn_cast<PossiblyExactOperator>(&I)) 351d59e6404SMatt Arsenault Inst->setIsExact(ExactOp->isExact()); 352d59e6404SMatt Arsenault } 353d59e6404SMatt Arsenault 354f74fc60aSKonstantin Zhuravlyov TruncRes = Builder.CreateTrunc(ExtRes, I.getType()); 355e14df4b2SKonstantin Zhuravlyov 356e14df4b2SKonstantin Zhuravlyov I.replaceAllUsesWith(TruncRes); 357e14df4b2SKonstantin Zhuravlyov I.eraseFromParent(); 358e14df4b2SKonstantin Zhuravlyov 359e14df4b2SKonstantin Zhuravlyov return true; 360e14df4b2SKonstantin Zhuravlyov } 361e14df4b2SKonstantin Zhuravlyov 362f74fc60aSKonstantin Zhuravlyov bool AMDGPUCodeGenPrepare::promoteUniformOpToI32(ICmpInst &I) const { 363f74fc60aSKonstantin Zhuravlyov assert(needsPromotionToI32(I.getOperand(0)->getType()) && 364f74fc60aSKonstantin Zhuravlyov "I does not need promotion to i32"); 365e14df4b2SKonstantin Zhuravlyov 366e14df4b2SKonstantin Zhuravlyov IRBuilder<> Builder(&I); 367e14df4b2SKonstantin Zhuravlyov Builder.SetCurrentDebugLocation(I.getDebugLoc()); 368e14df4b2SKonstantin Zhuravlyov 369f74fc60aSKonstantin Zhuravlyov Type *I32Ty = getI32Ty(Builder, I.getOperand(0)->getType()); 370e14df4b2SKonstantin Zhuravlyov Value *ExtOp0 = nullptr; 371e14df4b2SKonstantin Zhuravlyov Value *ExtOp1 = nullptr; 372e14df4b2SKonstantin Zhuravlyov Value *NewICmp = nullptr; 373e14df4b2SKonstantin Zhuravlyov 374e14df4b2SKonstantin Zhuravlyov if (I.isSigned()) { 375f74fc60aSKonstantin Zhuravlyov ExtOp0 = Builder.CreateSExt(I.getOperand(0), I32Ty); 376f74fc60aSKonstantin Zhuravlyov ExtOp1 = Builder.CreateSExt(I.getOperand(1), I32Ty); 377e14df4b2SKonstantin Zhuravlyov } else { 378f74fc60aSKonstantin Zhuravlyov ExtOp0 = Builder.CreateZExt(I.getOperand(0), I32Ty); 379f74fc60aSKonstantin Zhuravlyov ExtOp1 = Builder.CreateZExt(I.getOperand(1), I32Ty); 380e14df4b2SKonstantin Zhuravlyov } 381e14df4b2SKonstantin Zhuravlyov NewICmp = Builder.CreateICmp(I.getPredicate(), ExtOp0, ExtOp1); 382e14df4b2SKonstantin Zhuravlyov 383e14df4b2SKonstantin Zhuravlyov I.replaceAllUsesWith(NewICmp); 384e14df4b2SKonstantin Zhuravlyov I.eraseFromParent(); 385e14df4b2SKonstantin Zhuravlyov 386e14df4b2SKonstantin Zhuravlyov return true; 387e14df4b2SKonstantin Zhuravlyov } 388e14df4b2SKonstantin Zhuravlyov 389f74fc60aSKonstantin Zhuravlyov bool AMDGPUCodeGenPrepare::promoteUniformOpToI32(SelectInst &I) const { 390f74fc60aSKonstantin Zhuravlyov assert(needsPromotionToI32(I.getType()) && 391f74fc60aSKonstantin Zhuravlyov "I does not need promotion to i32"); 392e14df4b2SKonstantin Zhuravlyov 393e14df4b2SKonstantin Zhuravlyov IRBuilder<> Builder(&I); 394e14df4b2SKonstantin Zhuravlyov Builder.SetCurrentDebugLocation(I.getDebugLoc()); 395e14df4b2SKonstantin Zhuravlyov 396e14df4b2SKonstantin Zhuravlyov Type *I32Ty = getI32Ty(Builder, I.getType()); 397e14df4b2SKonstantin Zhuravlyov Value *ExtOp1 = nullptr; 398e14df4b2SKonstantin Zhuravlyov Value *ExtOp2 = nullptr; 399e14df4b2SKonstantin Zhuravlyov Value *ExtRes = nullptr; 400e14df4b2SKonstantin Zhuravlyov Value *TruncRes = nullptr; 401e14df4b2SKonstantin Zhuravlyov 402e14df4b2SKonstantin Zhuravlyov if (isSigned(I)) { 403e14df4b2SKonstantin Zhuravlyov ExtOp1 = Builder.CreateSExt(I.getOperand(1), I32Ty); 404e14df4b2SKonstantin Zhuravlyov ExtOp2 = Builder.CreateSExt(I.getOperand(2), I32Ty); 405e14df4b2SKonstantin Zhuravlyov } else { 406e14df4b2SKonstantin Zhuravlyov ExtOp1 = Builder.CreateZExt(I.getOperand(1), I32Ty); 407e14df4b2SKonstantin Zhuravlyov ExtOp2 = Builder.CreateZExt(I.getOperand(2), I32Ty); 408e14df4b2SKonstantin Zhuravlyov } 409e14df4b2SKonstantin Zhuravlyov ExtRes = Builder.CreateSelect(I.getOperand(0), ExtOp1, ExtOp2); 410f74fc60aSKonstantin Zhuravlyov TruncRes = Builder.CreateTrunc(ExtRes, I.getType()); 411e14df4b2SKonstantin Zhuravlyov 412e14df4b2SKonstantin Zhuravlyov I.replaceAllUsesWith(TruncRes); 413e14df4b2SKonstantin Zhuravlyov I.eraseFromParent(); 414e14df4b2SKonstantin Zhuravlyov 415e14df4b2SKonstantin Zhuravlyov return true; 416e14df4b2SKonstantin Zhuravlyov } 417e14df4b2SKonstantin Zhuravlyov 418f74fc60aSKonstantin Zhuravlyov bool AMDGPUCodeGenPrepare::promoteUniformBitreverseToI32( 419b4eb5d50SKonstantin Zhuravlyov IntrinsicInst &I) const { 420f74fc60aSKonstantin Zhuravlyov assert(I.getIntrinsicID() == Intrinsic::bitreverse && 421f74fc60aSKonstantin Zhuravlyov "I must be bitreverse intrinsic"); 422f74fc60aSKonstantin Zhuravlyov assert(needsPromotionToI32(I.getType()) && 423f74fc60aSKonstantin Zhuravlyov "I does not need promotion to i32"); 424b4eb5d50SKonstantin Zhuravlyov 425b4eb5d50SKonstantin Zhuravlyov IRBuilder<> Builder(&I); 426b4eb5d50SKonstantin Zhuravlyov Builder.SetCurrentDebugLocation(I.getDebugLoc()); 427b4eb5d50SKonstantin Zhuravlyov 428b4eb5d50SKonstantin Zhuravlyov Type *I32Ty = getI32Ty(Builder, I.getType()); 429b4eb5d50SKonstantin Zhuravlyov Function *I32 = 430c09e2d7eSKonstantin Zhuravlyov Intrinsic::getDeclaration(Mod, Intrinsic::bitreverse, { I32Ty }); 431b4eb5d50SKonstantin Zhuravlyov Value *ExtOp = Builder.CreateZExt(I.getOperand(0), I32Ty); 432b4eb5d50SKonstantin Zhuravlyov Value *ExtRes = Builder.CreateCall(I32, { ExtOp }); 433f74fc60aSKonstantin Zhuravlyov Value *LShrOp = 434f74fc60aSKonstantin Zhuravlyov Builder.CreateLShr(ExtRes, 32 - getBaseElementBitWidth(I.getType())); 435b4eb5d50SKonstantin Zhuravlyov Value *TruncRes = 436f74fc60aSKonstantin Zhuravlyov Builder.CreateTrunc(LShrOp, I.getType()); 437b4eb5d50SKonstantin Zhuravlyov 438b4eb5d50SKonstantin Zhuravlyov I.replaceAllUsesWith(TruncRes); 439b4eb5d50SKonstantin Zhuravlyov I.eraseFromParent(); 440b4eb5d50SKonstantin Zhuravlyov 441b4eb5d50SKonstantin Zhuravlyov return true; 442b4eb5d50SKonstantin Zhuravlyov } 443b4eb5d50SKonstantin Zhuravlyov 44449169a96SMatt Arsenault unsigned AMDGPUCodeGenPrepare::numBitsUnsigned(Value *Op, 44549169a96SMatt Arsenault unsigned ScalarSize) const { 44649169a96SMatt Arsenault KnownBits Known = computeKnownBits(Op, *DL, 0, AC); 44749169a96SMatt Arsenault return ScalarSize - Known.countMinLeadingZeros(); 44849169a96SMatt Arsenault } 44949169a96SMatt Arsenault 45049169a96SMatt Arsenault unsigned AMDGPUCodeGenPrepare::numBitsSigned(Value *Op, 45149169a96SMatt Arsenault unsigned ScalarSize) const { 45249169a96SMatt Arsenault // In order for this to be a signed 24-bit value, bit 23, must 45349169a96SMatt Arsenault // be a sign bit. 45449169a96SMatt Arsenault return ScalarSize - ComputeNumSignBits(Op, *DL, 0, AC); 45549169a96SMatt Arsenault } 45649169a96SMatt Arsenault 45749169a96SMatt Arsenault bool AMDGPUCodeGenPrepare::isI24(Value *V, unsigned ScalarSize) const { 45849169a96SMatt Arsenault return ScalarSize >= 24 && // Types less than 24-bit should be treated 45949169a96SMatt Arsenault // as unsigned 24-bit values. 46049169a96SMatt Arsenault numBitsSigned(V, ScalarSize) < 24; 46149169a96SMatt Arsenault } 46249169a96SMatt Arsenault 46349169a96SMatt Arsenault bool AMDGPUCodeGenPrepare::isU24(Value *V, unsigned ScalarSize) const { 46449169a96SMatt Arsenault return numBitsUnsigned(V, ScalarSize) <= 24; 46549169a96SMatt Arsenault } 46649169a96SMatt Arsenault 46749169a96SMatt Arsenault static void extractValues(IRBuilder<> &Builder, 46849169a96SMatt Arsenault SmallVectorImpl<Value *> &Values, Value *V) { 4693254a001SChristopher Tetreault auto *VT = dyn_cast<FixedVectorType>(V->getType()); 47049169a96SMatt Arsenault if (!VT) { 47149169a96SMatt Arsenault Values.push_back(V); 47249169a96SMatt Arsenault return; 47349169a96SMatt Arsenault } 47449169a96SMatt Arsenault 47549169a96SMatt Arsenault for (int I = 0, E = VT->getNumElements(); I != E; ++I) 47649169a96SMatt Arsenault Values.push_back(Builder.CreateExtractElement(V, I)); 47749169a96SMatt Arsenault } 47849169a96SMatt Arsenault 47949169a96SMatt Arsenault static Value *insertValues(IRBuilder<> &Builder, 48049169a96SMatt Arsenault Type *Ty, 48149169a96SMatt Arsenault SmallVectorImpl<Value *> &Values) { 48249169a96SMatt Arsenault if (Values.size() == 1) 48349169a96SMatt Arsenault return Values[0]; 48449169a96SMatt Arsenault 48549169a96SMatt Arsenault Value *NewVal = UndefValue::get(Ty); 48649169a96SMatt Arsenault for (int I = 0, E = Values.size(); I != E; ++I) 48749169a96SMatt Arsenault NewVal = Builder.CreateInsertElement(NewVal, Values[I], I); 48849169a96SMatt Arsenault 48949169a96SMatt Arsenault return NewVal; 49049169a96SMatt Arsenault } 49149169a96SMatt Arsenault 49249169a96SMatt Arsenault bool AMDGPUCodeGenPrepare::replaceMulWithMul24(BinaryOperator &I) const { 49349169a96SMatt Arsenault if (I.getOpcode() != Instruction::Mul) 49449169a96SMatt Arsenault return false; 49549169a96SMatt Arsenault 49649169a96SMatt Arsenault Type *Ty = I.getType(); 49749169a96SMatt Arsenault unsigned Size = Ty->getScalarSizeInBits(); 49849169a96SMatt Arsenault if (Size <= 16 && ST->has16BitInsts()) 49949169a96SMatt Arsenault return false; 50049169a96SMatt Arsenault 50149169a96SMatt Arsenault // Prefer scalar if this could be s_mul_i32 50249169a96SMatt Arsenault if (DA->isUniform(&I)) 50349169a96SMatt Arsenault return false; 50449169a96SMatt Arsenault 50549169a96SMatt Arsenault Value *LHS = I.getOperand(0); 50649169a96SMatt Arsenault Value *RHS = I.getOperand(1); 50749169a96SMatt Arsenault IRBuilder<> Builder(&I); 50849169a96SMatt Arsenault Builder.SetCurrentDebugLocation(I.getDebugLoc()); 50949169a96SMatt Arsenault 51049169a96SMatt Arsenault Intrinsic::ID IntrID = Intrinsic::not_intrinsic; 51149169a96SMatt Arsenault 51249169a96SMatt Arsenault if (ST->hasMulU24() && isU24(LHS, Size) && isU24(RHS, Size)) { 513*0379263fSAbinav Puthan Purayil // The mul24 instruction yields the low-order 32 bits. If the original 514*0379263fSAbinav Puthan Purayil // result and the destination is wider than 32 bits, the mul24 would 515*0379263fSAbinav Puthan Purayil // truncate the result. 516b3c9d84eSAbinav Puthan Purayil if (Size > 32 && 517b3c9d84eSAbinav Puthan Purayil numBitsUnsigned(LHS, Size) + numBitsUnsigned(RHS, Size) > 32) { 518b3c9d84eSAbinav Puthan Purayil return false; 519b3c9d84eSAbinav Puthan Purayil } 520b3c9d84eSAbinav Puthan Purayil 52149169a96SMatt Arsenault IntrID = Intrinsic::amdgcn_mul_u24; 52249169a96SMatt Arsenault } else if (ST->hasMulI24() && isI24(LHS, Size) && isI24(RHS, Size)) { 523*0379263fSAbinav Puthan Purayil // The original result is positive if its destination is wider than 32 bits 524*0379263fSAbinav Puthan Purayil // and its highest set bit is at bit 31. Generating mul24 and sign-extending 525*0379263fSAbinav Puthan Purayil // it would yield a negative value. 526*0379263fSAbinav Puthan Purayil if (Size > 32 && numBitsSigned(LHS, Size) + numBitsSigned(RHS, Size) > 30) { 527b3c9d84eSAbinav Puthan Purayil return false; 528b3c9d84eSAbinav Puthan Purayil } 529b3c9d84eSAbinav Puthan Purayil 53049169a96SMatt Arsenault IntrID = Intrinsic::amdgcn_mul_i24; 53149169a96SMatt Arsenault } else 53249169a96SMatt Arsenault return false; 53349169a96SMatt Arsenault 53449169a96SMatt Arsenault SmallVector<Value *, 4> LHSVals; 53549169a96SMatt Arsenault SmallVector<Value *, 4> RHSVals; 53649169a96SMatt Arsenault SmallVector<Value *, 4> ResultVals; 53749169a96SMatt Arsenault extractValues(Builder, LHSVals, LHS); 53849169a96SMatt Arsenault extractValues(Builder, RHSVals, RHS); 53949169a96SMatt Arsenault 54049169a96SMatt Arsenault 54149169a96SMatt Arsenault IntegerType *I32Ty = Builder.getInt32Ty(); 54249169a96SMatt Arsenault FunctionCallee Intrin = Intrinsic::getDeclaration(Mod, IntrID); 54349169a96SMatt Arsenault for (int I = 0, E = LHSVals.size(); I != E; ++I) { 54449169a96SMatt Arsenault Value *LHS, *RHS; 54549169a96SMatt Arsenault if (IntrID == Intrinsic::amdgcn_mul_u24) { 54649169a96SMatt Arsenault LHS = Builder.CreateZExtOrTrunc(LHSVals[I], I32Ty); 54749169a96SMatt Arsenault RHS = Builder.CreateZExtOrTrunc(RHSVals[I], I32Ty); 54849169a96SMatt Arsenault } else { 54949169a96SMatt Arsenault LHS = Builder.CreateSExtOrTrunc(LHSVals[I], I32Ty); 55049169a96SMatt Arsenault RHS = Builder.CreateSExtOrTrunc(RHSVals[I], I32Ty); 55149169a96SMatt Arsenault } 55249169a96SMatt Arsenault 55349169a96SMatt Arsenault Value *Result = Builder.CreateCall(Intrin, {LHS, RHS}); 55449169a96SMatt Arsenault 55549169a96SMatt Arsenault if (IntrID == Intrinsic::amdgcn_mul_u24) { 55649169a96SMatt Arsenault ResultVals.push_back(Builder.CreateZExtOrTrunc(Result, 55749169a96SMatt Arsenault LHSVals[I]->getType())); 55849169a96SMatt Arsenault } else { 55949169a96SMatt Arsenault ResultVals.push_back(Builder.CreateSExtOrTrunc(Result, 56049169a96SMatt Arsenault LHSVals[I]->getType())); 56149169a96SMatt Arsenault } 56249169a96SMatt Arsenault } 56349169a96SMatt Arsenault 564c6ab2b4fSMatt Arsenault Value *NewVal = insertValues(Builder, Ty, ResultVals); 565c6ab2b4fSMatt Arsenault NewVal->takeName(&I); 566c6ab2b4fSMatt Arsenault I.replaceAllUsesWith(NewVal); 56749169a96SMatt Arsenault I.eraseFromParent(); 56849169a96SMatt Arsenault 56949169a96SMatt Arsenault return true; 57049169a96SMatt Arsenault } 57149169a96SMatt Arsenault 5722fe500abSMatt Arsenault // Find a select instruction, which may have been casted. This is mostly to deal 573e93e1b62SMatt Arsenault // with cases where i16 selects were promoted here to i32. 5742fe500abSMatt Arsenault static SelectInst *findSelectThroughCast(Value *V, CastInst *&Cast) { 5752fe500abSMatt Arsenault Cast = nullptr; 5762fe500abSMatt Arsenault if (SelectInst *Sel = dyn_cast<SelectInst>(V)) 5772fe500abSMatt Arsenault return Sel; 5782fe500abSMatt Arsenault 5792fe500abSMatt Arsenault if ((Cast = dyn_cast<CastInst>(V))) { 5802fe500abSMatt Arsenault if (SelectInst *Sel = dyn_cast<SelectInst>(Cast->getOperand(0))) 5812fe500abSMatt Arsenault return Sel; 5822fe500abSMatt Arsenault } 5832fe500abSMatt Arsenault 5842fe500abSMatt Arsenault return nullptr; 5852fe500abSMatt Arsenault } 5862fe500abSMatt Arsenault 587bcd91778SMatt Arsenault bool AMDGPUCodeGenPrepare::foldBinOpIntoSelect(BinaryOperator &BO) const { 588bcd91778SMatt Arsenault // Don't do this unless the old select is going away. We want to eliminate the 589bcd91778SMatt Arsenault // binary operator, not replace a binop with a select. 590bcd91778SMatt Arsenault int SelOpNo = 0; 5912fe500abSMatt Arsenault 5922fe500abSMatt Arsenault CastInst *CastOp; 5932fe500abSMatt Arsenault 594dfec7022SMatt Arsenault // TODO: Should probably try to handle some cases with multiple 595dfec7022SMatt Arsenault // users. Duplicating the select may be profitable for division. 5962fe500abSMatt Arsenault SelectInst *Sel = findSelectThroughCast(BO.getOperand(0), CastOp); 597bcd91778SMatt Arsenault if (!Sel || !Sel->hasOneUse()) { 598bcd91778SMatt Arsenault SelOpNo = 1; 5992fe500abSMatt Arsenault Sel = findSelectThroughCast(BO.getOperand(1), CastOp); 600bcd91778SMatt Arsenault } 601bcd91778SMatt Arsenault 602bcd91778SMatt Arsenault if (!Sel || !Sel->hasOneUse()) 603bcd91778SMatt Arsenault return false; 604bcd91778SMatt Arsenault 605bcd91778SMatt Arsenault Constant *CT = dyn_cast<Constant>(Sel->getTrueValue()); 606bcd91778SMatt Arsenault Constant *CF = dyn_cast<Constant>(Sel->getFalseValue()); 607bcd91778SMatt Arsenault Constant *CBO = dyn_cast<Constant>(BO.getOperand(SelOpNo ^ 1)); 608bcd91778SMatt Arsenault if (!CBO || !CT || !CF) 609bcd91778SMatt Arsenault return false; 610bcd91778SMatt Arsenault 6112fe500abSMatt Arsenault if (CastOp) { 612dfec7022SMatt Arsenault if (!CastOp->hasOneUse()) 613dfec7022SMatt Arsenault return false; 6142fe500abSMatt Arsenault CT = ConstantFoldCastOperand(CastOp->getOpcode(), CT, BO.getType(), *DL); 6152fe500abSMatt Arsenault CF = ConstantFoldCastOperand(CastOp->getOpcode(), CF, BO.getType(), *DL); 6162fe500abSMatt Arsenault } 6172fe500abSMatt Arsenault 618bcd91778SMatt Arsenault // TODO: Handle special 0/-1 cases DAG combine does, although we only really 619bcd91778SMatt Arsenault // need to handle divisions here. 620bcd91778SMatt Arsenault Constant *FoldedT = SelOpNo ? 621bcd91778SMatt Arsenault ConstantFoldBinaryOpOperands(BO.getOpcode(), CBO, CT, *DL) : 622bcd91778SMatt Arsenault ConstantFoldBinaryOpOperands(BO.getOpcode(), CT, CBO, *DL); 623bcd91778SMatt Arsenault if (isa<ConstantExpr>(FoldedT)) 624bcd91778SMatt Arsenault return false; 625bcd91778SMatt Arsenault 626bcd91778SMatt Arsenault Constant *FoldedF = SelOpNo ? 627bcd91778SMatt Arsenault ConstantFoldBinaryOpOperands(BO.getOpcode(), CBO, CF, *DL) : 628bcd91778SMatt Arsenault ConstantFoldBinaryOpOperands(BO.getOpcode(), CF, CBO, *DL); 629bcd91778SMatt Arsenault if (isa<ConstantExpr>(FoldedF)) 630bcd91778SMatt Arsenault return false; 631bcd91778SMatt Arsenault 632bcd91778SMatt Arsenault IRBuilder<> Builder(&BO); 633bcd91778SMatt Arsenault Builder.SetCurrentDebugLocation(BO.getDebugLoc()); 634bcd91778SMatt Arsenault if (const FPMathOperator *FPOp = dyn_cast<const FPMathOperator>(&BO)) 635bcd91778SMatt Arsenault Builder.setFastMathFlags(FPOp->getFastMathFlags()); 636bcd91778SMatt Arsenault 637bcd91778SMatt Arsenault Value *NewSelect = Builder.CreateSelect(Sel->getCondition(), 638bcd91778SMatt Arsenault FoldedT, FoldedF); 639bcd91778SMatt Arsenault NewSelect->takeName(&BO); 640bcd91778SMatt Arsenault BO.replaceAllUsesWith(NewSelect); 641bcd91778SMatt Arsenault BO.eraseFromParent(); 6422fe500abSMatt Arsenault if (CastOp) 6432fe500abSMatt Arsenault CastOp->eraseFromParent(); 644bcd91778SMatt Arsenault Sel->eraseFromParent(); 645bcd91778SMatt Arsenault return true; 646bcd91778SMatt Arsenault } 647bcd91778SMatt Arsenault 648884acbb9SChangpeng Fang // Optimize fdiv with rcp: 64925315359SChangpeng Fang // 650884acbb9SChangpeng Fang // 1/x -> rcp(x) when rcp is sufficiently accurate or inaccurate rcp is 651884acbb9SChangpeng Fang // allowed with unsafe-fp-math or afn. 65225315359SChangpeng Fang // 653884acbb9SChangpeng Fang // a/b -> a*rcp(b) when inaccurate rcp is allowed with unsafe-fp-math or afn. 654884acbb9SChangpeng Fang static Value *optimizeWithRcp(Value *Num, Value *Den, bool AllowInaccurateRcp, 65598ed613cSNikita Popov bool RcpIsAccurate, IRBuilder<> &Builder, 656884acbb9SChangpeng Fang Module *Mod) { 65725315359SChangpeng Fang 658884acbb9SChangpeng Fang if (!AllowInaccurateRcp && !RcpIsAccurate) 65925315359SChangpeng Fang return nullptr; 66025315359SChangpeng Fang 661884acbb9SChangpeng Fang Type *Ty = Den->getType(); 66225315359SChangpeng Fang if (const ConstantFP *CLHS = dyn_cast<ConstantFP>(Num)) { 663884acbb9SChangpeng Fang if (AllowInaccurateRcp || RcpIsAccurate) { 66425315359SChangpeng Fang if (CLHS->isExactlyValue(1.0)) { 665b87e3e2dSMatt Arsenault Function *Decl = Intrinsic::getDeclaration( 666b87e3e2dSMatt Arsenault Mod, Intrinsic::amdgcn_rcp, Ty); 667b87e3e2dSMatt Arsenault 66825315359SChangpeng Fang // v_rcp_f32 and v_rsq_f32 do not support denormals, and according to 66925315359SChangpeng Fang // the CI documentation has a worst case error of 1 ulp. 67025315359SChangpeng Fang // OpenCL requires <= 2.5 ulp for 1.0 / x, so it should always be OK to 67125315359SChangpeng Fang // use it as long as we aren't trying to use denormals. 67225315359SChangpeng Fang // 67325315359SChangpeng Fang // v_rcp_f16 and v_rsq_f16 DO support denormals. 67425315359SChangpeng Fang 67525315359SChangpeng Fang // NOTE: v_sqrt and v_rcp will be combined to v_rsq later. So we don't 67625315359SChangpeng Fang // insert rsq intrinsic here. 67725315359SChangpeng Fang 67825315359SChangpeng Fang // 1.0 / x -> rcp(x) 67925315359SChangpeng Fang return Builder.CreateCall(Decl, { Den }); 68025315359SChangpeng Fang } 68125315359SChangpeng Fang 68225315359SChangpeng Fang // Same as for 1.0, but expand the sign out of the constant. 68325315359SChangpeng Fang if (CLHS->isExactlyValue(-1.0)) { 684b87e3e2dSMatt Arsenault Function *Decl = Intrinsic::getDeclaration( 685b87e3e2dSMatt Arsenault Mod, Intrinsic::amdgcn_rcp, Ty); 686b87e3e2dSMatt Arsenault 68725315359SChangpeng Fang // -1.0 / x -> rcp (fneg x) 68825315359SChangpeng Fang Value *FNeg = Builder.CreateFNeg(Den); 68925315359SChangpeng Fang return Builder.CreateCall(Decl, { FNeg }); 69025315359SChangpeng Fang } 69125315359SChangpeng Fang } 69225315359SChangpeng Fang } 69325315359SChangpeng Fang 694884acbb9SChangpeng Fang if (AllowInaccurateRcp) { 695b87e3e2dSMatt Arsenault Function *Decl = Intrinsic::getDeclaration( 696b87e3e2dSMatt Arsenault Mod, Intrinsic::amdgcn_rcp, Ty); 697b87e3e2dSMatt Arsenault 69825315359SChangpeng Fang // Turn into multiply by the reciprocal. 69925315359SChangpeng Fang // x / y -> x * (1.0 / y) 70025315359SChangpeng Fang Value *Recip = Builder.CreateCall(Decl, { Den }); 701884acbb9SChangpeng Fang return Builder.CreateFMul(Num, Recip); 70225315359SChangpeng Fang } 70325315359SChangpeng Fang return nullptr; 70425315359SChangpeng Fang } 70525315359SChangpeng Fang 706884acbb9SChangpeng Fang // optimize with fdiv.fast: 707884acbb9SChangpeng Fang // 708884acbb9SChangpeng Fang // a/b -> fdiv.fast(a, b) when !fpmath >= 2.5ulp with denormals flushed. 709884acbb9SChangpeng Fang // 710884acbb9SChangpeng Fang // 1/x -> fdiv.fast(1,x) when !fpmath >= 2.5ulp. 711884acbb9SChangpeng Fang // 712884acbb9SChangpeng Fang // NOTE: optimizeWithRcp should be tried first because rcp is the preference. 713884acbb9SChangpeng Fang static Value *optimizeWithFDivFast(Value *Num, Value *Den, float ReqdAccuracy, 71498ed613cSNikita Popov bool HasDenormals, IRBuilder<> &Builder, 715884acbb9SChangpeng Fang Module *Mod) { 716884acbb9SChangpeng Fang // fdiv.fast can achieve 2.5 ULP accuracy. 717884acbb9SChangpeng Fang if (ReqdAccuracy < 2.5f) 718884acbb9SChangpeng Fang return nullptr; 719df61be70SStanislav Mekhanoshin 720884acbb9SChangpeng Fang // Only have fdiv.fast for f32. 721884acbb9SChangpeng Fang Type *Ty = Den->getType(); 722884acbb9SChangpeng Fang if (!Ty->isFloatTy()) 723884acbb9SChangpeng Fang return nullptr; 724df61be70SStanislav Mekhanoshin 725884acbb9SChangpeng Fang bool NumIsOne = false; 726884acbb9SChangpeng Fang if (const ConstantFP *CNum = dyn_cast<ConstantFP>(Num)) { 727884acbb9SChangpeng Fang if (CNum->isExactlyValue(+1.0) || CNum->isExactlyValue(-1.0)) 728884acbb9SChangpeng Fang NumIsOne = true; 729a1fe17c9SMatt Arsenault } 730a1fe17c9SMatt Arsenault 731884acbb9SChangpeng Fang // fdiv does not support denormals. But 1.0/x is always fine to use it. 732884acbb9SChangpeng Fang if (HasDenormals && !NumIsOne) 733884acbb9SChangpeng Fang return nullptr; 73425315359SChangpeng Fang 735884acbb9SChangpeng Fang Function *Decl = Intrinsic::getDeclaration(Mod, Intrinsic::amdgcn_fdiv_fast); 736884acbb9SChangpeng Fang return Builder.CreateCall(Decl, { Num, Den }); 737884acbb9SChangpeng Fang } 738884acbb9SChangpeng Fang 739884acbb9SChangpeng Fang // Optimizations is performed based on fpmath, fast math flags as well as 740884acbb9SChangpeng Fang // denormals to optimize fdiv with either rcp or fdiv.fast. 74125315359SChangpeng Fang // 742884acbb9SChangpeng Fang // With rcp: 743884acbb9SChangpeng Fang // 1/x -> rcp(x) when rcp is sufficiently accurate or inaccurate rcp is 744884acbb9SChangpeng Fang // allowed with unsafe-fp-math or afn. 74525315359SChangpeng Fang // 746884acbb9SChangpeng Fang // a/b -> a*rcp(b) when inaccurate rcp is allowed with unsafe-fp-math or afn. 74725315359SChangpeng Fang // 748884acbb9SChangpeng Fang // With fdiv.fast: 749884acbb9SChangpeng Fang // a/b -> fdiv.fast(a, b) when !fpmath >= 2.5ulp with denormals flushed. 75025315359SChangpeng Fang // 751884acbb9SChangpeng Fang // 1/x -> fdiv.fast(1,x) when !fpmath >= 2.5ulp. 752884acbb9SChangpeng Fang // 753884acbb9SChangpeng Fang // NOTE: rcp is the preference in cases that both are legal. 754a1fe17c9SMatt Arsenault bool AMDGPUCodeGenPrepare::visitFDiv(BinaryOperator &FDiv) { 755a1fe17c9SMatt Arsenault 75625315359SChangpeng Fang Type *Ty = FDiv.getType()->getScalarType(); 757a1fe17c9SMatt Arsenault 7582a0db8d7SMatt Arsenault // The f64 rcp/rsq approximations are pretty inaccurate. We can do an 7592a0db8d7SMatt Arsenault // expansion around them in codegen. 7602a0db8d7SMatt Arsenault if (Ty->isDoubleTy()) 7612a0db8d7SMatt Arsenault return false; 7622a0db8d7SMatt Arsenault 76325315359SChangpeng Fang // No intrinsic for fdiv16 if target does not support f16. 76425315359SChangpeng Fang if (Ty->isHalfTy() && !ST->has16BitInsts()) 765a1fe17c9SMatt Arsenault return false; 766a1fe17c9SMatt Arsenault 767a1fe17c9SMatt Arsenault const FPMathOperator *FPOp = cast<const FPMathOperator>(&FDiv); 768884acbb9SChangpeng Fang const float ReqdAccuracy = FPOp->getFPAccuracy(); 769a1fe17c9SMatt Arsenault 770884acbb9SChangpeng Fang // Inaccurate rcp is allowed with unsafe-fp-math or afn. 771a1fe17c9SMatt Arsenault FastMathFlags FMF = FPOp->getFastMathFlags(); 772884acbb9SChangpeng Fang const bool AllowInaccurateRcp = HasUnsafeFPMath || FMF.approxFunc(); 7739d7b1c9dSStanislav Mekhanoshin 774884acbb9SChangpeng Fang // rcp_f16 is accurate for !fpmath >= 1.0ulp. 775884acbb9SChangpeng Fang // rcp_f32 is accurate for !fpmath >= 1.0ulp and denormals are flushed. 776884acbb9SChangpeng Fang // rcp_f64 is never accurate. 777884acbb9SChangpeng Fang const bool RcpIsAccurate = (Ty->isHalfTy() && ReqdAccuracy >= 1.0f) || 778884acbb9SChangpeng Fang (Ty->isFloatTy() && !HasFP32Denormals && ReqdAccuracy >= 1.0f); 779a1fe17c9SMatt Arsenault 78025315359SChangpeng Fang IRBuilder<> Builder(FDiv.getParent(), std::next(FDiv.getIterator())); 781a1fe17c9SMatt Arsenault Builder.setFastMathFlags(FMF); 782a1fe17c9SMatt Arsenault Builder.SetCurrentDebugLocation(FDiv.getDebugLoc()); 783a1fe17c9SMatt Arsenault 784a1fe17c9SMatt Arsenault Value *Num = FDiv.getOperand(0); 785a1fe17c9SMatt Arsenault Value *Den = FDiv.getOperand(1); 786a1fe17c9SMatt Arsenault 787a1fe17c9SMatt Arsenault Value *NewFDiv = nullptr; 7883254a001SChristopher Tetreault if (auto *VT = dyn_cast<FixedVectorType>(FDiv.getType())) { 789a1fe17c9SMatt Arsenault NewFDiv = UndefValue::get(VT); 790a1fe17c9SMatt Arsenault 791a1fe17c9SMatt Arsenault // FIXME: Doesn't do the right thing for cases where the vector is partially 792a1fe17c9SMatt Arsenault // constant. This works when the scalarizer pass is run first. 793a1fe17c9SMatt Arsenault for (unsigned I = 0, E = VT->getNumElements(); I != E; ++I) { 794a1fe17c9SMatt Arsenault Value *NumEltI = Builder.CreateExtractElement(Num, I); 795a1fe17c9SMatt Arsenault Value *DenEltI = Builder.CreateExtractElement(Den, I); 796884acbb9SChangpeng Fang // Try rcp first. 797884acbb9SChangpeng Fang Value *NewElt = optimizeWithRcp(NumEltI, DenEltI, AllowInaccurateRcp, 798884acbb9SChangpeng Fang RcpIsAccurate, Builder, Mod); 799884acbb9SChangpeng Fang if (!NewElt) // Try fdiv.fast. 800884acbb9SChangpeng Fang NewElt = optimizeWithFDivFast(NumEltI, DenEltI, ReqdAccuracy, 801884acbb9SChangpeng Fang HasFP32Denormals, Builder, Mod); 802884acbb9SChangpeng Fang if (!NewElt) // Keep the original. 803884acbb9SChangpeng Fang NewElt = Builder.CreateFDiv(NumEltI, DenEltI); 804a1fe17c9SMatt Arsenault 805a1fe17c9SMatt Arsenault NewFDiv = Builder.CreateInsertElement(NewFDiv, NewElt, I); 806a1fe17c9SMatt Arsenault } 807884acbb9SChangpeng Fang } else { // Scalar FDiv. 808884acbb9SChangpeng Fang // Try rcp first. 809884acbb9SChangpeng Fang NewFDiv = optimizeWithRcp(Num, Den, AllowInaccurateRcp, RcpIsAccurate, 810884acbb9SChangpeng Fang Builder, Mod); 811884acbb9SChangpeng Fang if (!NewFDiv) { // Try fdiv.fast. 812884acbb9SChangpeng Fang NewFDiv = optimizeWithFDivFast(Num, Den, ReqdAccuracy, HasFP32Denormals, 813884acbb9SChangpeng Fang Builder, Mod); 81425315359SChangpeng Fang } 815a1fe17c9SMatt Arsenault } 816a1fe17c9SMatt Arsenault 817a1fe17c9SMatt Arsenault if (NewFDiv) { 818a1fe17c9SMatt Arsenault FDiv.replaceAllUsesWith(NewFDiv); 819a1fe17c9SMatt Arsenault NewFDiv->takeName(&FDiv); 820a1fe17c9SMatt Arsenault FDiv.eraseFromParent(); 821a1fe17c9SMatt Arsenault } 822a1fe17c9SMatt Arsenault 823df61be70SStanislav Mekhanoshin return !!NewFDiv; 824a1fe17c9SMatt Arsenault } 825a1fe17c9SMatt Arsenault 8262e5dc4a1SAnshil Gandhi bool AMDGPUCodeGenPrepare::visitXor(BinaryOperator &I) { 8272e5dc4a1SAnshil Gandhi // Match the Xor instruction, its type and its operands 8282e5dc4a1SAnshil Gandhi IntrinsicInst *IntrinsicCall = dyn_cast<IntrinsicInst>(I.getOperand(0)); 8292e5dc4a1SAnshil Gandhi ConstantInt *RHS = dyn_cast<ConstantInt>(I.getOperand(1)); 8302e5dc4a1SAnshil Gandhi if (!RHS || !IntrinsicCall || RHS->getSExtValue() != -1) 8312e5dc4a1SAnshil Gandhi return visitBinaryOperator(I); 8322e5dc4a1SAnshil Gandhi 833dc6e8dfdSJacob Lambert // Check if the Call is an intrinsic instruction to amdgcn_class intrinsic 8342e5dc4a1SAnshil Gandhi // has only one use 8352e5dc4a1SAnshil Gandhi if (IntrinsicCall->getIntrinsicID() != Intrinsic::amdgcn_class || 8362e5dc4a1SAnshil Gandhi !IntrinsicCall->hasOneUse()) 8372e5dc4a1SAnshil Gandhi return visitBinaryOperator(I); 8382e5dc4a1SAnshil Gandhi 8392e5dc4a1SAnshil Gandhi // "Not" the second argument of the intrinsic call 8402e5dc4a1SAnshil Gandhi ConstantInt *Arg = dyn_cast<ConstantInt>(IntrinsicCall->getOperand(1)); 8412e5dc4a1SAnshil Gandhi if (!Arg) 8422e5dc4a1SAnshil Gandhi return visitBinaryOperator(I); 8432e5dc4a1SAnshil Gandhi 8442e5dc4a1SAnshil Gandhi IntrinsicCall->setOperand( 8452e5dc4a1SAnshil Gandhi 1, ConstantInt::get(Arg->getType(), Arg->getZExtValue() ^ 0x3ff)); 8462e5dc4a1SAnshil Gandhi I.replaceAllUsesWith(IntrinsicCall); 8472e5dc4a1SAnshil Gandhi I.eraseFromParent(); 8482e5dc4a1SAnshil Gandhi return true; 8492e5dc4a1SAnshil Gandhi } 8502e5dc4a1SAnshil Gandhi 851a1fe17c9SMatt Arsenault static bool hasUnsafeFPMath(const Function &F) { 852a1fe17c9SMatt Arsenault Attribute Attr = F.getFnAttribute("unsafe-fp-math"); 853d6de1e1aSSerge Guelton return Attr.getValueAsBool(); 854a1fe17c9SMatt Arsenault } 855a1fe17c9SMatt Arsenault 85667aa18f1SStanislav Mekhanoshin static std::pair<Value*, Value*> getMul64(IRBuilder<> &Builder, 85767aa18f1SStanislav Mekhanoshin Value *LHS, Value *RHS) { 85867aa18f1SStanislav Mekhanoshin Type *I32Ty = Builder.getInt32Ty(); 85967aa18f1SStanislav Mekhanoshin Type *I64Ty = Builder.getInt64Ty(); 860e14df4b2SKonstantin Zhuravlyov 86167aa18f1SStanislav Mekhanoshin Value *LHS_EXT64 = Builder.CreateZExt(LHS, I64Ty); 86267aa18f1SStanislav Mekhanoshin Value *RHS_EXT64 = Builder.CreateZExt(RHS, I64Ty); 86367aa18f1SStanislav Mekhanoshin Value *MUL64 = Builder.CreateMul(LHS_EXT64, RHS_EXT64); 86467aa18f1SStanislav Mekhanoshin Value *Lo = Builder.CreateTrunc(MUL64, I32Ty); 86567aa18f1SStanislav Mekhanoshin Value *Hi = Builder.CreateLShr(MUL64, Builder.getInt64(32)); 86667aa18f1SStanislav Mekhanoshin Hi = Builder.CreateTrunc(Hi, I32Ty); 86767aa18f1SStanislav Mekhanoshin return std::make_pair(Lo, Hi); 86867aa18f1SStanislav Mekhanoshin } 86967aa18f1SStanislav Mekhanoshin 87067aa18f1SStanislav Mekhanoshin static Value* getMulHu(IRBuilder<> &Builder, Value *LHS, Value *RHS) { 87167aa18f1SStanislav Mekhanoshin return getMul64(Builder, LHS, RHS).second; 87267aa18f1SStanislav Mekhanoshin } 87367aa18f1SStanislav Mekhanoshin 87434d9a16eSMatt Arsenault /// Figure out how many bits are really needed for this ddivision. \p AtLeast is 87534d9a16eSMatt Arsenault /// an optimization hint to bypass the second ComputeNumSignBits call if we the 87634d9a16eSMatt Arsenault /// first one is insufficient. Returns -1 on failure. 87734d9a16eSMatt Arsenault int AMDGPUCodeGenPrepare::getDivNumBits(BinaryOperator &I, 87834d9a16eSMatt Arsenault Value *Num, Value *Den, 87934d9a16eSMatt Arsenault unsigned AtLeast, bool IsSigned) const { 88034d9a16eSMatt Arsenault const DataLayout &DL = Mod->getDataLayout(); 88134d9a16eSMatt Arsenault unsigned LHSSignBits = ComputeNumSignBits(Num, DL, 0, AC, &I); 88234d9a16eSMatt Arsenault if (LHSSignBits < AtLeast) 88334d9a16eSMatt Arsenault return -1; 88434d9a16eSMatt Arsenault 88534d9a16eSMatt Arsenault unsigned RHSSignBits = ComputeNumSignBits(Den, DL, 0, AC, &I); 88634d9a16eSMatt Arsenault if (RHSSignBits < AtLeast) 88734d9a16eSMatt Arsenault return -1; 88834d9a16eSMatt Arsenault 88934d9a16eSMatt Arsenault unsigned SignBits = std::min(LHSSignBits, RHSSignBits); 89034d9a16eSMatt Arsenault unsigned DivBits = Num->getType()->getScalarSizeInBits() - SignBits; 89134d9a16eSMatt Arsenault if (IsSigned) 89234d9a16eSMatt Arsenault ++DivBits; 89334d9a16eSMatt Arsenault return DivBits; 89434d9a16eSMatt Arsenault } 89534d9a16eSMatt Arsenault 89667aa18f1SStanislav Mekhanoshin // The fractional part of a float is enough to accurately represent up to 89767aa18f1SStanislav Mekhanoshin // a 24-bit signed integer. 89867aa18f1SStanislav Mekhanoshin Value *AMDGPUCodeGenPrepare::expandDivRem24(IRBuilder<> &Builder, 8997e7268acSStanislav Mekhanoshin BinaryOperator &I, 90067aa18f1SStanislav Mekhanoshin Value *Num, Value *Den, 90167aa18f1SStanislav Mekhanoshin bool IsDiv, bool IsSigned) const { 90234d9a16eSMatt Arsenault int DivBits = getDivNumBits(I, Num, Den, 9, IsSigned); 90334d9a16eSMatt Arsenault if (DivBits == -1) 90467aa18f1SStanislav Mekhanoshin return nullptr; 90534d9a16eSMatt Arsenault return expandDivRem24Impl(Builder, I, Num, Den, DivBits, IsDiv, IsSigned); 90634d9a16eSMatt Arsenault } 90767aa18f1SStanislav Mekhanoshin 90834d9a16eSMatt Arsenault Value *AMDGPUCodeGenPrepare::expandDivRem24Impl(IRBuilder<> &Builder, 90934d9a16eSMatt Arsenault BinaryOperator &I, 91034d9a16eSMatt Arsenault Value *Num, Value *Den, 91134d9a16eSMatt Arsenault unsigned DivBits, 91234d9a16eSMatt Arsenault bool IsDiv, bool IsSigned) const { 91367aa18f1SStanislav Mekhanoshin Type *I32Ty = Builder.getInt32Ty(); 91434d9a16eSMatt Arsenault Num = Builder.CreateTrunc(Num, I32Ty); 91534d9a16eSMatt Arsenault Den = Builder.CreateTrunc(Den, I32Ty); 91634d9a16eSMatt Arsenault 91767aa18f1SStanislav Mekhanoshin Type *F32Ty = Builder.getFloatTy(); 91867aa18f1SStanislav Mekhanoshin ConstantInt *One = Builder.getInt32(1); 91967aa18f1SStanislav Mekhanoshin Value *JQ = One; 92067aa18f1SStanislav Mekhanoshin 92167aa18f1SStanislav Mekhanoshin if (IsSigned) { 92267aa18f1SStanislav Mekhanoshin // char|short jq = ia ^ ib; 92367aa18f1SStanislav Mekhanoshin JQ = Builder.CreateXor(Num, Den); 92467aa18f1SStanislav Mekhanoshin 92567aa18f1SStanislav Mekhanoshin // jq = jq >> (bitsize - 2) 92667aa18f1SStanislav Mekhanoshin JQ = Builder.CreateAShr(JQ, Builder.getInt32(30)); 92767aa18f1SStanislav Mekhanoshin 92867aa18f1SStanislav Mekhanoshin // jq = jq | 0x1 92967aa18f1SStanislav Mekhanoshin JQ = Builder.CreateOr(JQ, One); 93067aa18f1SStanislav Mekhanoshin } 93167aa18f1SStanislav Mekhanoshin 93267aa18f1SStanislav Mekhanoshin // int ia = (int)LHS; 93367aa18f1SStanislav Mekhanoshin Value *IA = Num; 93467aa18f1SStanislav Mekhanoshin 93567aa18f1SStanislav Mekhanoshin // int ib, (int)RHS; 93667aa18f1SStanislav Mekhanoshin Value *IB = Den; 93767aa18f1SStanislav Mekhanoshin 93867aa18f1SStanislav Mekhanoshin // float fa = (float)ia; 93967aa18f1SStanislav Mekhanoshin Value *FA = IsSigned ? Builder.CreateSIToFP(IA, F32Ty) 94067aa18f1SStanislav Mekhanoshin : Builder.CreateUIToFP(IA, F32Ty); 94167aa18f1SStanislav Mekhanoshin 94267aa18f1SStanislav Mekhanoshin // float fb = (float)ib; 94367aa18f1SStanislav Mekhanoshin Value *FB = IsSigned ? Builder.CreateSIToFP(IB,F32Ty) 94467aa18f1SStanislav Mekhanoshin : Builder.CreateUIToFP(IB,F32Ty); 94567aa18f1SStanislav Mekhanoshin 94692c62582SMatt Arsenault Function *RcpDecl = Intrinsic::getDeclaration(Mod, Intrinsic::amdgcn_rcp, 94792c62582SMatt Arsenault Builder.getFloatTy()); 94892c62582SMatt Arsenault Value *RCP = Builder.CreateCall(RcpDecl, { FB }); 94967aa18f1SStanislav Mekhanoshin Value *FQM = Builder.CreateFMul(FA, RCP); 95067aa18f1SStanislav Mekhanoshin 95167aa18f1SStanislav Mekhanoshin // fq = trunc(fqm); 95257f5d0a8SNeil Henning CallInst *FQ = Builder.CreateUnaryIntrinsic(Intrinsic::trunc, FQM); 95367aa18f1SStanislav Mekhanoshin FQ->copyFastMathFlags(Builder.getFastMathFlags()); 95467aa18f1SStanislav Mekhanoshin 95567aa18f1SStanislav Mekhanoshin // float fqneg = -fq; 95667aa18f1SStanislav Mekhanoshin Value *FQNeg = Builder.CreateFNeg(FQ); 95767aa18f1SStanislav Mekhanoshin 95867aa18f1SStanislav Mekhanoshin // float fr = mad(fqneg, fb, fa); 9599ee272f1SStanislav Mekhanoshin auto FMAD = !ST->hasMadMacF32Insts() 9609ee272f1SStanislav Mekhanoshin ? Intrinsic::fma 9619ee272f1SStanislav Mekhanoshin : (Intrinsic::ID)Intrinsic::amdgcn_fmad_ftz; 9629ee272f1SStanislav Mekhanoshin Value *FR = Builder.CreateIntrinsic(FMAD, 96357f5d0a8SNeil Henning {FQNeg->getType()}, {FQNeg, FB, FA}, FQ); 96467aa18f1SStanislav Mekhanoshin 96567aa18f1SStanislav Mekhanoshin // int iq = (int)fq; 96667aa18f1SStanislav Mekhanoshin Value *IQ = IsSigned ? Builder.CreateFPToSI(FQ, I32Ty) 96767aa18f1SStanislav Mekhanoshin : Builder.CreateFPToUI(FQ, I32Ty); 96867aa18f1SStanislav Mekhanoshin 96967aa18f1SStanislav Mekhanoshin // fr = fabs(fr); 97057f5d0a8SNeil Henning FR = Builder.CreateUnaryIntrinsic(Intrinsic::fabs, FR, FQ); 97167aa18f1SStanislav Mekhanoshin 97267aa18f1SStanislav Mekhanoshin // fb = fabs(fb); 97357f5d0a8SNeil Henning FB = Builder.CreateUnaryIntrinsic(Intrinsic::fabs, FB, FQ); 97467aa18f1SStanislav Mekhanoshin 97567aa18f1SStanislav Mekhanoshin // int cv = fr >= fb; 97667aa18f1SStanislav Mekhanoshin Value *CV = Builder.CreateFCmpOGE(FR, FB); 97767aa18f1SStanislav Mekhanoshin 97867aa18f1SStanislav Mekhanoshin // jq = (cv ? jq : 0); 97967aa18f1SStanislav Mekhanoshin JQ = Builder.CreateSelect(CV, JQ, Builder.getInt32(0)); 98067aa18f1SStanislav Mekhanoshin 98167aa18f1SStanislav Mekhanoshin // dst = iq + jq; 98267aa18f1SStanislav Mekhanoshin Value *Div = Builder.CreateAdd(IQ, JQ); 98367aa18f1SStanislav Mekhanoshin 98467aa18f1SStanislav Mekhanoshin Value *Res = Div; 98567aa18f1SStanislav Mekhanoshin if (!IsDiv) { 98667aa18f1SStanislav Mekhanoshin // Rem needs compensation, it's easier to recompute it 98767aa18f1SStanislav Mekhanoshin Value *Rem = Builder.CreateMul(Div, Den); 98867aa18f1SStanislav Mekhanoshin Res = Builder.CreateSub(Num, Rem); 98967aa18f1SStanislav Mekhanoshin } 99067aa18f1SStanislav Mekhanoshin 99134d9a16eSMatt Arsenault if (DivBits != 0 && DivBits < 32) { 992e5823bf8SMatt Arsenault // Extend in register from the number of bits this divide really is. 99367aa18f1SStanislav Mekhanoshin if (IsSigned) { 99434d9a16eSMatt Arsenault int InRegBits = 32 - DivBits; 99534d9a16eSMatt Arsenault 99634d9a16eSMatt Arsenault Res = Builder.CreateShl(Res, InRegBits); 99734d9a16eSMatt Arsenault Res = Builder.CreateAShr(Res, InRegBits); 99867aa18f1SStanislav Mekhanoshin } else { 99934d9a16eSMatt Arsenault ConstantInt *TruncMask 100034d9a16eSMatt Arsenault = Builder.getInt32((UINT64_C(1) << DivBits) - 1); 100167aa18f1SStanislav Mekhanoshin Res = Builder.CreateAnd(Res, TruncMask); 100267aa18f1SStanislav Mekhanoshin } 100334d9a16eSMatt Arsenault } 100467aa18f1SStanislav Mekhanoshin 100567aa18f1SStanislav Mekhanoshin return Res; 100667aa18f1SStanislav Mekhanoshin } 100767aa18f1SStanislav Mekhanoshin 1008b30e1223SMatt Arsenault // Try to recognize special cases the DAG will emit special, better expansions 1009b30e1223SMatt Arsenault // than the general expansion we do here. 1010b30e1223SMatt Arsenault 1011b30e1223SMatt Arsenault // TODO: It would be better to just directly handle those optimizations here. 1012b30e1223SMatt Arsenault bool AMDGPUCodeGenPrepare::divHasSpecialOptimization( 1013b30e1223SMatt Arsenault BinaryOperator &I, Value *Num, Value *Den) const { 1014b30e1223SMatt Arsenault if (Constant *C = dyn_cast<Constant>(Den)) { 1015b30e1223SMatt Arsenault // Arbitrary constants get a better expansion as long as a wider mulhi is 1016b30e1223SMatt Arsenault // legal. 1017b30e1223SMatt Arsenault if (C->getType()->getScalarSizeInBits() <= 32) 1018b30e1223SMatt Arsenault return true; 1019b30e1223SMatt Arsenault 1020b30e1223SMatt Arsenault // TODO: Sdiv check for not exact for some reason. 1021b30e1223SMatt Arsenault 1022b30e1223SMatt Arsenault // If there's no wider mulhi, there's only a better expansion for powers of 1023b30e1223SMatt Arsenault // two. 1024b30e1223SMatt Arsenault // TODO: Should really know for each vector element. 1025b30e1223SMatt Arsenault if (isKnownToBeAPowerOfTwo(C, *DL, true, 0, AC, &I, DT)) 1026b30e1223SMatt Arsenault return true; 1027b30e1223SMatt Arsenault 1028b30e1223SMatt Arsenault return false; 1029b30e1223SMatt Arsenault } 1030b30e1223SMatt Arsenault 1031b30e1223SMatt Arsenault if (BinaryOperator *BinOpDen = dyn_cast<BinaryOperator>(Den)) { 1032b30e1223SMatt Arsenault // fold (udiv x, (shl c, y)) -> x >>u (log2(c)+y) iff c is power of 2 1033b30e1223SMatt Arsenault if (BinOpDen->getOpcode() == Instruction::Shl && 1034b30e1223SMatt Arsenault isa<Constant>(BinOpDen->getOperand(0)) && 1035b30e1223SMatt Arsenault isKnownToBeAPowerOfTwo(BinOpDen->getOperand(0), *DL, true, 1036b30e1223SMatt Arsenault 0, AC, &I, DT)) { 1037b30e1223SMatt Arsenault return true; 1038b30e1223SMatt Arsenault } 1039b30e1223SMatt Arsenault } 1040b30e1223SMatt Arsenault 1041b30e1223SMatt Arsenault return false; 1042b30e1223SMatt Arsenault } 1043b30e1223SMatt Arsenault 10445fa87ec0SNikita Popov static Value *getSign32(Value *V, IRBuilder<> &Builder, const DataLayout *DL) { 10455fa87ec0SNikita Popov // Check whether the sign can be determined statically. 10465fa87ec0SNikita Popov KnownBits Known = computeKnownBits(V, *DL); 10475fa87ec0SNikita Popov if (Known.isNegative()) 10485fa87ec0SNikita Popov return Constant::getAllOnesValue(V->getType()); 10495fa87ec0SNikita Popov if (Known.isNonNegative()) 10505fa87ec0SNikita Popov return Constant::getNullValue(V->getType()); 10515fa87ec0SNikita Popov return Builder.CreateAShr(V, Builder.getInt32(31)); 10525fa87ec0SNikita Popov } 10535fa87ec0SNikita Popov 105467aa18f1SStanislav Mekhanoshin Value *AMDGPUCodeGenPrepare::expandDivRem32(IRBuilder<> &Builder, 1055f4bd01c1SJay Foad BinaryOperator &I, Value *X, 1056f4bd01c1SJay Foad Value *Y) const { 10577e7268acSStanislav Mekhanoshin Instruction::BinaryOps Opc = I.getOpcode(); 105867aa18f1SStanislav Mekhanoshin assert(Opc == Instruction::URem || Opc == Instruction::UDiv || 105967aa18f1SStanislav Mekhanoshin Opc == Instruction::SRem || Opc == Instruction::SDiv); 106067aa18f1SStanislav Mekhanoshin 106167aa18f1SStanislav Mekhanoshin FastMathFlags FMF; 106267aa18f1SStanislav Mekhanoshin FMF.setFast(); 106367aa18f1SStanislav Mekhanoshin Builder.setFastMathFlags(FMF); 106467aa18f1SStanislav Mekhanoshin 1065f4bd01c1SJay Foad if (divHasSpecialOptimization(I, X, Y)) 1066b30e1223SMatt Arsenault return nullptr; // Keep it for later optimization. 106767aa18f1SStanislav Mekhanoshin 106867aa18f1SStanislav Mekhanoshin bool IsDiv = Opc == Instruction::UDiv || Opc == Instruction::SDiv; 106967aa18f1SStanislav Mekhanoshin bool IsSigned = Opc == Instruction::SRem || Opc == Instruction::SDiv; 107067aa18f1SStanislav Mekhanoshin 1071f4bd01c1SJay Foad Type *Ty = X->getType(); 107267aa18f1SStanislav Mekhanoshin Type *I32Ty = Builder.getInt32Ty(); 107367aa18f1SStanislav Mekhanoshin Type *F32Ty = Builder.getFloatTy(); 107467aa18f1SStanislav Mekhanoshin 107567aa18f1SStanislav Mekhanoshin if (Ty->getScalarSizeInBits() < 32) { 107667aa18f1SStanislav Mekhanoshin if (IsSigned) { 1077f4bd01c1SJay Foad X = Builder.CreateSExt(X, I32Ty); 1078f4bd01c1SJay Foad Y = Builder.CreateSExt(Y, I32Ty); 107967aa18f1SStanislav Mekhanoshin } else { 1080f4bd01c1SJay Foad X = Builder.CreateZExt(X, I32Ty); 1081f4bd01c1SJay Foad Y = Builder.CreateZExt(Y, I32Ty); 108267aa18f1SStanislav Mekhanoshin } 108367aa18f1SStanislav Mekhanoshin } 108467aa18f1SStanislav Mekhanoshin 1085f4bd01c1SJay Foad if (Value *Res = expandDivRem24(Builder, I, X, Y, IsDiv, IsSigned)) { 108634d9a16eSMatt Arsenault return IsSigned ? Builder.CreateSExtOrTrunc(Res, Ty) : 108734d9a16eSMatt Arsenault Builder.CreateZExtOrTrunc(Res, Ty); 108867aa18f1SStanislav Mekhanoshin } 108967aa18f1SStanislav Mekhanoshin 109067aa18f1SStanislav Mekhanoshin ConstantInt *Zero = Builder.getInt32(0); 109167aa18f1SStanislav Mekhanoshin ConstantInt *One = Builder.getInt32(1); 109267aa18f1SStanislav Mekhanoshin 109367aa18f1SStanislav Mekhanoshin Value *Sign = nullptr; 109467aa18f1SStanislav Mekhanoshin if (IsSigned) { 1095f4bd01c1SJay Foad Value *SignX = getSign32(X, Builder, DL); 1096f4bd01c1SJay Foad Value *SignY = getSign32(Y, Builder, DL); 109767aa18f1SStanislav Mekhanoshin // Remainder sign is the same as LHS 1098f4bd01c1SJay Foad Sign = IsDiv ? Builder.CreateXor(SignX, SignY) : SignX; 109967aa18f1SStanislav Mekhanoshin 1100f4bd01c1SJay Foad X = Builder.CreateAdd(X, SignX); 1101f4bd01c1SJay Foad Y = Builder.CreateAdd(Y, SignY); 110267aa18f1SStanislav Mekhanoshin 1103f4bd01c1SJay Foad X = Builder.CreateXor(X, SignX); 1104f4bd01c1SJay Foad Y = Builder.CreateXor(Y, SignY); 110567aa18f1SStanislav Mekhanoshin } 110667aa18f1SStanislav Mekhanoshin 1107f4bd01c1SJay Foad // The algorithm here is based on ideas from "Software Integer Division", Tom 1108f4bd01c1SJay Foad // Rodeheffer, August 2008. 1109f4bd01c1SJay Foad // 1110f4bd01c1SJay Foad // unsigned udiv(unsigned x, unsigned y) { 1111f4bd01c1SJay Foad // // Initial estimate of inv(y). The constant is less than 2^32 to ensure 1112f4bd01c1SJay Foad // // that this is a lower bound on inv(y), even if some of the calculations 1113f4bd01c1SJay Foad // // round up. 1114f4bd01c1SJay Foad // unsigned z = (unsigned)((4294967296.0 - 512.0) * v_rcp_f32((float)y)); 1115f4bd01c1SJay Foad // 1116f4bd01c1SJay Foad // // One round of UNR (Unsigned integer Newton-Raphson) to improve z. 1117f4bd01c1SJay Foad // // Empirically this is guaranteed to give a "two-y" lower bound on 1118f4bd01c1SJay Foad // // inv(y). 1119f4bd01c1SJay Foad // z += umulh(z, -y * z); 1120f4bd01c1SJay Foad // 1121f4bd01c1SJay Foad // // Quotient/remainder estimate. 1122f4bd01c1SJay Foad // unsigned q = umulh(x, z); 1123f4bd01c1SJay Foad // unsigned r = x - q * y; 1124f4bd01c1SJay Foad // 1125f4bd01c1SJay Foad // // Two rounds of quotient/remainder refinement. 1126f4bd01c1SJay Foad // if (r >= y) { 1127f4bd01c1SJay Foad // ++q; 1128f4bd01c1SJay Foad // r -= y; 1129f4bd01c1SJay Foad // } 1130f4bd01c1SJay Foad // if (r >= y) { 1131f4bd01c1SJay Foad // ++q; 1132f4bd01c1SJay Foad // r -= y; 1133f4bd01c1SJay Foad // } 1134f4bd01c1SJay Foad // 1135f4bd01c1SJay Foad // return q; 1136f4bd01c1SJay Foad // } 113792c62582SMatt Arsenault 1138f4bd01c1SJay Foad // Initial estimate of inv(y). 1139f4bd01c1SJay Foad Value *FloatY = Builder.CreateUIToFP(Y, F32Ty); 1140f4bd01c1SJay Foad Function *Rcp = Intrinsic::getDeclaration(Mod, Intrinsic::amdgcn_rcp, F32Ty); 1141f4bd01c1SJay Foad Value *RcpY = Builder.CreateCall(Rcp, {FloatY}); 1142f4bd01c1SJay Foad Constant *Scale = ConstantFP::get(F32Ty, BitsToFloat(0x4F7FFFFE)); 1143f4bd01c1SJay Foad Value *ScaledY = Builder.CreateFMul(RcpY, Scale); 1144f4bd01c1SJay Foad Value *Z = Builder.CreateFPToUI(ScaledY, I32Ty); 114567aa18f1SStanislav Mekhanoshin 1146f4bd01c1SJay Foad // One round of UNR. 1147f4bd01c1SJay Foad Value *NegY = Builder.CreateSub(Zero, Y); 1148f4bd01c1SJay Foad Value *NegYZ = Builder.CreateMul(NegY, Z); 1149f4bd01c1SJay Foad Z = Builder.CreateAdd(Z, getMulHu(Builder, Z, NegYZ)); 115067aa18f1SStanislav Mekhanoshin 1151f4bd01c1SJay Foad // Quotient/remainder estimate. 1152f4bd01c1SJay Foad Value *Q = getMulHu(Builder, X, Z); 1153f4bd01c1SJay Foad Value *R = Builder.CreateSub(X, Builder.CreateMul(Q, Y)); 115467aa18f1SStanislav Mekhanoshin 1155f4bd01c1SJay Foad // First quotient/remainder refinement. 1156f4bd01c1SJay Foad Value *Cond = Builder.CreateICmpUGE(R, Y); 1157f4bd01c1SJay Foad if (IsDiv) 1158f4bd01c1SJay Foad Q = Builder.CreateSelect(Cond, Builder.CreateAdd(Q, One), Q); 1159f4bd01c1SJay Foad R = Builder.CreateSelect(Cond, Builder.CreateSub(R, Y), R); 116067aa18f1SStanislav Mekhanoshin 1161f4bd01c1SJay Foad // Second quotient/remainder refinement. 1162f4bd01c1SJay Foad Cond = Builder.CreateICmpUGE(R, Y); 116367aa18f1SStanislav Mekhanoshin Value *Res; 1164f4bd01c1SJay Foad if (IsDiv) 1165f4bd01c1SJay Foad Res = Builder.CreateSelect(Cond, Builder.CreateAdd(Q, One), Q); 1166f4bd01c1SJay Foad else 1167f4bd01c1SJay Foad Res = Builder.CreateSelect(Cond, Builder.CreateSub(R, Y), R); 116867aa18f1SStanislav Mekhanoshin 116967aa18f1SStanislav Mekhanoshin if (IsSigned) { 117067aa18f1SStanislav Mekhanoshin Res = Builder.CreateXor(Res, Sign); 117167aa18f1SStanislav Mekhanoshin Res = Builder.CreateSub(Res, Sign); 117267aa18f1SStanislav Mekhanoshin } 117367aa18f1SStanislav Mekhanoshin 117467aa18f1SStanislav Mekhanoshin Res = Builder.CreateTrunc(Res, Ty); 117567aa18f1SStanislav Mekhanoshin 117667aa18f1SStanislav Mekhanoshin return Res; 117767aa18f1SStanislav Mekhanoshin } 117867aa18f1SStanislav Mekhanoshin 117934d9a16eSMatt Arsenault Value *AMDGPUCodeGenPrepare::shrinkDivRem64(IRBuilder<> &Builder, 118034d9a16eSMatt Arsenault BinaryOperator &I, 118134d9a16eSMatt Arsenault Value *Num, Value *Den) const { 118234d9a16eSMatt Arsenault if (!ExpandDiv64InIR && divHasSpecialOptimization(I, Num, Den)) 118334d9a16eSMatt Arsenault return nullptr; // Keep it for later optimization. 118434d9a16eSMatt Arsenault 118534d9a16eSMatt Arsenault Instruction::BinaryOps Opc = I.getOpcode(); 118634d9a16eSMatt Arsenault 118734d9a16eSMatt Arsenault bool IsDiv = Opc == Instruction::SDiv || Opc == Instruction::UDiv; 118834d9a16eSMatt Arsenault bool IsSigned = Opc == Instruction::SDiv || Opc == Instruction::SRem; 118934d9a16eSMatt Arsenault 119034d9a16eSMatt Arsenault int NumDivBits = getDivNumBits(I, Num, Den, 32, IsSigned); 119134d9a16eSMatt Arsenault if (NumDivBits == -1) 119234d9a16eSMatt Arsenault return nullptr; 119334d9a16eSMatt Arsenault 119434d9a16eSMatt Arsenault Value *Narrowed = nullptr; 119534d9a16eSMatt Arsenault if (NumDivBits <= 24) { 119634d9a16eSMatt Arsenault Narrowed = expandDivRem24Impl(Builder, I, Num, Den, NumDivBits, 119734d9a16eSMatt Arsenault IsDiv, IsSigned); 119834d9a16eSMatt Arsenault } else if (NumDivBits <= 32) { 119934d9a16eSMatt Arsenault Narrowed = expandDivRem32(Builder, I, Num, Den); 120034d9a16eSMatt Arsenault } 120134d9a16eSMatt Arsenault 120234d9a16eSMatt Arsenault if (Narrowed) { 120334d9a16eSMatt Arsenault return IsSigned ? Builder.CreateSExt(Narrowed, Num->getType()) : 120434d9a16eSMatt Arsenault Builder.CreateZExt(Narrowed, Num->getType()); 120534d9a16eSMatt Arsenault } 120634d9a16eSMatt Arsenault 120734d9a16eSMatt Arsenault return nullptr; 120834d9a16eSMatt Arsenault } 120934d9a16eSMatt Arsenault 121034d9a16eSMatt Arsenault void AMDGPUCodeGenPrepare::expandDivRem64(BinaryOperator &I) const { 121134d9a16eSMatt Arsenault Instruction::BinaryOps Opc = I.getOpcode(); 121234d9a16eSMatt Arsenault // Do the general expansion. 121334d9a16eSMatt Arsenault if (Opc == Instruction::UDiv || Opc == Instruction::SDiv) { 121434d9a16eSMatt Arsenault expandDivisionUpTo64Bits(&I); 121534d9a16eSMatt Arsenault return; 121634d9a16eSMatt Arsenault } 121734d9a16eSMatt Arsenault 121834d9a16eSMatt Arsenault if (Opc == Instruction::URem || Opc == Instruction::SRem) { 121934d9a16eSMatt Arsenault expandRemainderUpTo64Bits(&I); 122034d9a16eSMatt Arsenault return; 122134d9a16eSMatt Arsenault } 122234d9a16eSMatt Arsenault 122334d9a16eSMatt Arsenault llvm_unreachable("not a division"); 122434d9a16eSMatt Arsenault } 122534d9a16eSMatt Arsenault 122667aa18f1SStanislav Mekhanoshin bool AMDGPUCodeGenPrepare::visitBinaryOperator(BinaryOperator &I) { 1227bcd91778SMatt Arsenault if (foldBinOpIntoSelect(I)) 1228bcd91778SMatt Arsenault return true; 1229bcd91778SMatt Arsenault 1230f74fc60aSKonstantin Zhuravlyov if (ST->has16BitInsts() && needsPromotionToI32(I.getType()) && 123167aa18f1SStanislav Mekhanoshin DA->isUniform(&I) && promoteUniformOpToI32(I)) 123267aa18f1SStanislav Mekhanoshin return true; 123367aa18f1SStanislav Mekhanoshin 1234b3dd381aSMatt Arsenault if (UseMul24Intrin && replaceMulWithMul24(I)) 123549169a96SMatt Arsenault return true; 123649169a96SMatt Arsenault 123767aa18f1SStanislav Mekhanoshin bool Changed = false; 123867aa18f1SStanislav Mekhanoshin Instruction::BinaryOps Opc = I.getOpcode(); 123967aa18f1SStanislav Mekhanoshin Type *Ty = I.getType(); 124067aa18f1SStanislav Mekhanoshin Value *NewDiv = nullptr; 124134d9a16eSMatt Arsenault unsigned ScalarSize = Ty->getScalarSizeInBits(); 124234d9a16eSMatt Arsenault 124334d9a16eSMatt Arsenault SmallVector<BinaryOperator *, 8> Div64ToExpand; 124434d9a16eSMatt Arsenault 124567aa18f1SStanislav Mekhanoshin if ((Opc == Instruction::URem || Opc == Instruction::UDiv || 124667aa18f1SStanislav Mekhanoshin Opc == Instruction::SRem || Opc == Instruction::SDiv) && 12479ec66860SMatt Arsenault ScalarSize <= 64 && 12489ec66860SMatt Arsenault !DisableIDivExpand) { 124967aa18f1SStanislav Mekhanoshin Value *Num = I.getOperand(0); 125067aa18f1SStanislav Mekhanoshin Value *Den = I.getOperand(1); 125167aa18f1SStanislav Mekhanoshin IRBuilder<> Builder(&I); 125267aa18f1SStanislav Mekhanoshin Builder.SetCurrentDebugLocation(I.getDebugLoc()); 125367aa18f1SStanislav Mekhanoshin 12543254a001SChristopher Tetreault if (auto *VT = dyn_cast<FixedVectorType>(Ty)) { 125567aa18f1SStanislav Mekhanoshin NewDiv = UndefValue::get(VT); 125667aa18f1SStanislav Mekhanoshin 12577e7268acSStanislav Mekhanoshin for (unsigned N = 0, E = VT->getNumElements(); N != E; ++N) { 12587e7268acSStanislav Mekhanoshin Value *NumEltN = Builder.CreateExtractElement(Num, N); 12597e7268acSStanislav Mekhanoshin Value *DenEltN = Builder.CreateExtractElement(Den, N); 126034d9a16eSMatt Arsenault 126134d9a16eSMatt Arsenault Value *NewElt; 126234d9a16eSMatt Arsenault if (ScalarSize <= 32) { 126334d9a16eSMatt Arsenault NewElt = expandDivRem32(Builder, I, NumEltN, DenEltN); 126467aa18f1SStanislav Mekhanoshin if (!NewElt) 12657e7268acSStanislav Mekhanoshin NewElt = Builder.CreateBinOp(Opc, NumEltN, DenEltN); 126634d9a16eSMatt Arsenault } else { 126734d9a16eSMatt Arsenault // See if this 64-bit division can be shrunk to 32/24-bits before 126834d9a16eSMatt Arsenault // producing the general expansion. 126934d9a16eSMatt Arsenault NewElt = shrinkDivRem64(Builder, I, NumEltN, DenEltN); 127034d9a16eSMatt Arsenault if (!NewElt) { 127134d9a16eSMatt Arsenault // The general 64-bit expansion introduces control flow and doesn't 127234d9a16eSMatt Arsenault // return the new value. Just insert a scalar copy and defer 127334d9a16eSMatt Arsenault // expanding it. 127434d9a16eSMatt Arsenault NewElt = Builder.CreateBinOp(Opc, NumEltN, DenEltN); 127534d9a16eSMatt Arsenault Div64ToExpand.push_back(cast<BinaryOperator>(NewElt)); 127634d9a16eSMatt Arsenault } 127734d9a16eSMatt Arsenault } 127834d9a16eSMatt Arsenault 12797e7268acSStanislav Mekhanoshin NewDiv = Builder.CreateInsertElement(NewDiv, NewElt, N); 128067aa18f1SStanislav Mekhanoshin } 128167aa18f1SStanislav Mekhanoshin } else { 128234d9a16eSMatt Arsenault if (ScalarSize <= 32) 12837e7268acSStanislav Mekhanoshin NewDiv = expandDivRem32(Builder, I, Num, Den); 128434d9a16eSMatt Arsenault else { 128534d9a16eSMatt Arsenault NewDiv = shrinkDivRem64(Builder, I, Num, Den); 128634d9a16eSMatt Arsenault if (!NewDiv) 128734d9a16eSMatt Arsenault Div64ToExpand.push_back(&I); 128834d9a16eSMatt Arsenault } 128967aa18f1SStanislav Mekhanoshin } 129067aa18f1SStanislav Mekhanoshin 129167aa18f1SStanislav Mekhanoshin if (NewDiv) { 129267aa18f1SStanislav Mekhanoshin I.replaceAllUsesWith(NewDiv); 129367aa18f1SStanislav Mekhanoshin I.eraseFromParent(); 129467aa18f1SStanislav Mekhanoshin Changed = true; 129567aa18f1SStanislav Mekhanoshin } 129667aa18f1SStanislav Mekhanoshin } 1297e14df4b2SKonstantin Zhuravlyov 129834d9a16eSMatt Arsenault if (ExpandDiv64InIR) { 129934d9a16eSMatt Arsenault // TODO: We get much worse code in specially handled constant cases. 130034d9a16eSMatt Arsenault for (BinaryOperator *Div : Div64ToExpand) { 130134d9a16eSMatt Arsenault expandDivRem64(*Div); 130234d9a16eSMatt Arsenault Changed = true; 130334d9a16eSMatt Arsenault } 130434d9a16eSMatt Arsenault } 130534d9a16eSMatt Arsenault 1306e14df4b2SKonstantin Zhuravlyov return Changed; 1307e14df4b2SKonstantin Zhuravlyov } 1308e14df4b2SKonstantin Zhuravlyov 1309a126a13bSWei Ding bool AMDGPUCodeGenPrepare::visitLoadInst(LoadInst &I) { 131090083d30SMatt Arsenault if (!WidenLoads) 131190083d30SMatt Arsenault return false; 131290083d30SMatt Arsenault 13130da6350dSMatt Arsenault if ((I.getPointerAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS || 13140da6350dSMatt Arsenault I.getPointerAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT) && 1315a126a13bSWei Ding canWidenScalarExtLoad(I)) { 1316a126a13bSWei Ding IRBuilder<> Builder(&I); 1317a126a13bSWei Ding Builder.SetCurrentDebugLocation(I.getDebugLoc()); 1318a126a13bSWei Ding 1319a126a13bSWei Ding Type *I32Ty = Builder.getInt32Ty(); 1320a126a13bSWei Ding Type *PT = PointerType::get(I32Ty, I.getPointerAddressSpace()); 1321a126a13bSWei Ding Value *BitCast= Builder.CreateBitCast(I.getPointerOperand(), PT); 132214359ef1SJames Y Knight LoadInst *WidenLoad = Builder.CreateLoad(I32Ty, BitCast); 132357e541e8SMatt Arsenault WidenLoad->copyMetadata(I); 132457e541e8SMatt Arsenault 132557e541e8SMatt Arsenault // If we have range metadata, we need to convert the type, and not make 132657e541e8SMatt Arsenault // assumptions about the high bits. 132757e541e8SMatt Arsenault if (auto *Range = WidenLoad->getMetadata(LLVMContext::MD_range)) { 132857e541e8SMatt Arsenault ConstantInt *Lower = 132957e541e8SMatt Arsenault mdconst::extract<ConstantInt>(Range->getOperand(0)); 133057e541e8SMatt Arsenault 1331477b9bc9SJay Foad if (Lower->isNullValue()) { 133257e541e8SMatt Arsenault WidenLoad->setMetadata(LLVMContext::MD_range, nullptr); 133357e541e8SMatt Arsenault } else { 133457e541e8SMatt Arsenault Metadata *LowAndHigh[] = { 133557e541e8SMatt Arsenault ConstantAsMetadata::get(ConstantInt::get(I32Ty, Lower->getValue().zext(32))), 133657e541e8SMatt Arsenault // Don't make assumptions about the high bits. 133757e541e8SMatt Arsenault ConstantAsMetadata::get(ConstantInt::get(I32Ty, 0)) 133857e541e8SMatt Arsenault }; 133957e541e8SMatt Arsenault 134057e541e8SMatt Arsenault WidenLoad->setMetadata(LLVMContext::MD_range, 134157e541e8SMatt Arsenault MDNode::get(Mod->getContext(), LowAndHigh)); 134257e541e8SMatt Arsenault } 134357e541e8SMatt Arsenault } 1344a126a13bSWei Ding 1345a126a13bSWei Ding int TySize = Mod->getDataLayout().getTypeSizeInBits(I.getType()); 1346a126a13bSWei Ding Type *IntNTy = Builder.getIntNTy(TySize); 1347a126a13bSWei Ding Value *ValTrunc = Builder.CreateTrunc(WidenLoad, IntNTy); 1348a126a13bSWei Ding Value *ValOrig = Builder.CreateBitCast(ValTrunc, I.getType()); 1349a126a13bSWei Ding I.replaceAllUsesWith(ValOrig); 1350a126a13bSWei Ding I.eraseFromParent(); 1351a126a13bSWei Ding return true; 1352a126a13bSWei Ding } 1353a126a13bSWei Ding 1354a126a13bSWei Ding return false; 1355a126a13bSWei Ding } 1356a126a13bSWei Ding 1357e14df4b2SKonstantin Zhuravlyov bool AMDGPUCodeGenPrepare::visitICmpInst(ICmpInst &I) { 1358e14df4b2SKonstantin Zhuravlyov bool Changed = false; 1359e14df4b2SKonstantin Zhuravlyov 1360f74fc60aSKonstantin Zhuravlyov if (ST->has16BitInsts() && needsPromotionToI32(I.getOperand(0)->getType()) && 1361f74fc60aSKonstantin Zhuravlyov DA->isUniform(&I)) 1362f74fc60aSKonstantin Zhuravlyov Changed |= promoteUniformOpToI32(I); 1363e14df4b2SKonstantin Zhuravlyov 1364e14df4b2SKonstantin Zhuravlyov return Changed; 1365e14df4b2SKonstantin Zhuravlyov } 1366e14df4b2SKonstantin Zhuravlyov 1367e14df4b2SKonstantin Zhuravlyov bool AMDGPUCodeGenPrepare::visitSelectInst(SelectInst &I) { 1368e14df4b2SKonstantin Zhuravlyov bool Changed = false; 1369e14df4b2SKonstantin Zhuravlyov 1370f74fc60aSKonstantin Zhuravlyov if (ST->has16BitInsts() && needsPromotionToI32(I.getType()) && 1371f74fc60aSKonstantin Zhuravlyov DA->isUniform(&I)) 1372f74fc60aSKonstantin Zhuravlyov Changed |= promoteUniformOpToI32(I); 1373b4eb5d50SKonstantin Zhuravlyov 1374b4eb5d50SKonstantin Zhuravlyov return Changed; 1375b4eb5d50SKonstantin Zhuravlyov } 1376b4eb5d50SKonstantin Zhuravlyov 1377b4eb5d50SKonstantin Zhuravlyov bool AMDGPUCodeGenPrepare::visitIntrinsicInst(IntrinsicInst &I) { 1378b4eb5d50SKonstantin Zhuravlyov switch (I.getIntrinsicID()) { 1379b4eb5d50SKonstantin Zhuravlyov case Intrinsic::bitreverse: 1380b4eb5d50SKonstantin Zhuravlyov return visitBitreverseIntrinsicInst(I); 1381b4eb5d50SKonstantin Zhuravlyov default: 1382b4eb5d50SKonstantin Zhuravlyov return false; 1383b4eb5d50SKonstantin Zhuravlyov } 1384b4eb5d50SKonstantin Zhuravlyov } 1385b4eb5d50SKonstantin Zhuravlyov 1386b4eb5d50SKonstantin Zhuravlyov bool AMDGPUCodeGenPrepare::visitBitreverseIntrinsicInst(IntrinsicInst &I) { 1387b4eb5d50SKonstantin Zhuravlyov bool Changed = false; 1388b4eb5d50SKonstantin Zhuravlyov 1389f74fc60aSKonstantin Zhuravlyov if (ST->has16BitInsts() && needsPromotionToI32(I.getType()) && 1390f74fc60aSKonstantin Zhuravlyov DA->isUniform(&I)) 1391f74fc60aSKonstantin Zhuravlyov Changed |= promoteUniformBitreverseToI32(I); 1392e14df4b2SKonstantin Zhuravlyov 1393e14df4b2SKonstantin Zhuravlyov return Changed; 1394e14df4b2SKonstantin Zhuravlyov } 1395e14df4b2SKonstantin Zhuravlyov 139686de486dSMatt Arsenault bool AMDGPUCodeGenPrepare::doInitialization(Module &M) { 1397a1fe17c9SMatt Arsenault Mod = &M; 139849169a96SMatt Arsenault DL = &Mod->getDataLayout(); 139986de486dSMatt Arsenault return false; 140086de486dSMatt Arsenault } 140186de486dSMatt Arsenault 140286de486dSMatt Arsenault bool AMDGPUCodeGenPrepare::runOnFunction(Function &F) { 14038b61764cSFrancis Visoiu Mistrih if (skipFunction(F)) 140486de486dSMatt Arsenault return false; 140586de486dSMatt Arsenault 14068b61764cSFrancis Visoiu Mistrih auto *TPC = getAnalysisIfAvailable<TargetPassConfig>(); 14078b61764cSFrancis Visoiu Mistrih if (!TPC) 14088b61764cSFrancis Visoiu Mistrih return false; 14098b61764cSFrancis Visoiu Mistrih 141012269ddaSMatt Arsenault const AMDGPUTargetMachine &TM = TPC->getTM<AMDGPUTargetMachine>(); 14115bfbae5cSTom Stellard ST = &TM.getSubtarget<GCNSubtarget>(F); 14127e7268acSStanislav Mekhanoshin AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 141335617ed4SNicolai Haehnle DA = &getAnalysis<LegacyDivergenceAnalysis>(); 1414b30e1223SMatt Arsenault 1415b30e1223SMatt Arsenault auto *DTWP = getAnalysisIfAvailable<DominatorTreeWrapperPass>(); 1416b30e1223SMatt Arsenault DT = DTWP ? &DTWP->getDomTree() : nullptr; 1417b30e1223SMatt Arsenault 1418a1fe17c9SMatt Arsenault HasUnsafeFPMath = hasUnsafeFPMath(F); 14195660bb6bSMatt Arsenault 14205660bb6bSMatt Arsenault AMDGPU::SIModeRegisterDefaults Mode(F); 14215660bb6bSMatt Arsenault HasFP32Denormals = Mode.allFP32Denormals(); 142286de486dSMatt Arsenault 1423a1fe17c9SMatt Arsenault bool MadeChange = false; 1424a1fe17c9SMatt Arsenault 142534d9a16eSMatt Arsenault Function::iterator NextBB; 142634d9a16eSMatt Arsenault for (Function::iterator FI = F.begin(), FE = F.end(); FI != FE; FI = NextBB) { 142734d9a16eSMatt Arsenault BasicBlock *BB = &*FI; 142834d9a16eSMatt Arsenault NextBB = std::next(FI); 142934d9a16eSMatt Arsenault 1430a1fe17c9SMatt Arsenault BasicBlock::iterator Next; 143134d9a16eSMatt Arsenault for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; I = Next) { 1432a1fe17c9SMatt Arsenault Next = std::next(I); 143334d9a16eSMatt Arsenault 1434a1fe17c9SMatt Arsenault MadeChange |= visit(*I); 143534d9a16eSMatt Arsenault 143634d9a16eSMatt Arsenault if (Next != E) { // Control flow changed 143734d9a16eSMatt Arsenault BasicBlock *NextInstBB = Next->getParent(); 143834d9a16eSMatt Arsenault if (NextInstBB != BB) { 143934d9a16eSMatt Arsenault BB = NextInstBB; 144034d9a16eSMatt Arsenault E = BB->end(); 144134d9a16eSMatt Arsenault FE = F.end(); 144234d9a16eSMatt Arsenault } 144334d9a16eSMatt Arsenault } 1444a1fe17c9SMatt Arsenault } 1445a1fe17c9SMatt Arsenault } 1446a1fe17c9SMatt Arsenault 1447a1fe17c9SMatt Arsenault return MadeChange; 144886de486dSMatt Arsenault } 144986de486dSMatt Arsenault 14508b61764cSFrancis Visoiu Mistrih INITIALIZE_PASS_BEGIN(AMDGPUCodeGenPrepare, DEBUG_TYPE, 145186de486dSMatt Arsenault "AMDGPU IR optimizations", false, false) 14527e7268acSStanislav Mekhanoshin INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 145335617ed4SNicolai Haehnle INITIALIZE_PASS_DEPENDENCY(LegacyDivergenceAnalysis) 14548b61764cSFrancis Visoiu Mistrih INITIALIZE_PASS_END(AMDGPUCodeGenPrepare, DEBUG_TYPE, "AMDGPU IR optimizations", 14558b61764cSFrancis Visoiu Mistrih false, false) 145686de486dSMatt Arsenault 145786de486dSMatt Arsenault char AMDGPUCodeGenPrepare::ID = 0; 145886de486dSMatt Arsenault 14598b61764cSFrancis Visoiu Mistrih FunctionPass *llvm::createAMDGPUCodeGenPreparePass() { 14608b61764cSFrancis Visoiu Mistrih return new AMDGPUCodeGenPrepare(); 146186de486dSMatt Arsenault } 1462