186de486dSMatt Arsenault //===-- AMDGPUCodeGenPrepare.cpp ------------------------------------------===// 286de486dSMatt Arsenault // 32946cd70SChandler Carruth // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 42946cd70SChandler Carruth // See https://llvm.org/LICENSE.txt for license information. 52946cd70SChandler Carruth // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 686de486dSMatt Arsenault // 786de486dSMatt Arsenault //===----------------------------------------------------------------------===// 886de486dSMatt Arsenault // 986de486dSMatt Arsenault /// \file 1086de486dSMatt Arsenault /// This pass does misc. AMDGPU optimizations on IR before instruction 1186de486dSMatt Arsenault /// selection. 1286de486dSMatt Arsenault // 1386de486dSMatt Arsenault //===----------------------------------------------------------------------===// 1486de486dSMatt Arsenault 1586de486dSMatt Arsenault #include "AMDGPU.h" 1686de486dSMatt Arsenault #include "AMDGPUSubtarget.h" 17a1fe17c9SMatt Arsenault #include "AMDGPUTargetMachine.h" 18734bb7bbSEugene Zelenko #include "llvm/ADT/StringRef.h" 197e7268acSStanislav Mekhanoshin #include "llvm/Analysis/AssumptionCache.h" 20bcd91778SMatt Arsenault #include "llvm/Analysis/ConstantFolding.h" 2135617ed4SNicolai Haehnle #include "llvm/Analysis/LegacyDivergenceAnalysis.h" 22a126a13bSWei Ding #include "llvm/Analysis/Loads.h" 2367aa18f1SStanislav Mekhanoshin #include "llvm/Analysis/ValueTracking.h" 2486de486dSMatt Arsenault #include "llvm/CodeGen/Passes.h" 258b61764cSFrancis Visoiu Mistrih #include "llvm/CodeGen/TargetPassConfig.h" 26734bb7bbSEugene Zelenko #include "llvm/IR/Attributes.h" 27734bb7bbSEugene Zelenko #include "llvm/IR/BasicBlock.h" 28734bb7bbSEugene Zelenko #include "llvm/IR/Constants.h" 29734bb7bbSEugene Zelenko #include "llvm/IR/DerivedTypes.h" 30734bb7bbSEugene Zelenko #include "llvm/IR/Function.h" 316bda14b3SChandler Carruth #include "llvm/IR/IRBuilder.h" 326bda14b3SChandler Carruth #include "llvm/IR/InstVisitor.h" 33734bb7bbSEugene Zelenko #include "llvm/IR/InstrTypes.h" 34734bb7bbSEugene Zelenko #include "llvm/IR/Instruction.h" 35734bb7bbSEugene Zelenko #include "llvm/IR/Instructions.h" 36734bb7bbSEugene Zelenko #include "llvm/IR/IntrinsicInst.h" 37734bb7bbSEugene Zelenko #include "llvm/IR/Intrinsics.h" 38734bb7bbSEugene Zelenko #include "llvm/IR/LLVMContext.h" 39734bb7bbSEugene Zelenko #include "llvm/IR/Operator.h" 40734bb7bbSEugene Zelenko #include "llvm/IR/Type.h" 41734bb7bbSEugene Zelenko #include "llvm/IR/Value.h" 4234d9a16eSMatt Arsenault #include "llvm/Transforms/Utils/IntegerDivision.h" 4305da2fe5SReid Kleckner #include "llvm/InitializePasses.h" 44734bb7bbSEugene Zelenko #include "llvm/Pass.h" 45734bb7bbSEugene Zelenko #include "llvm/Support/Casting.h" 46734bb7bbSEugene Zelenko #include <cassert> 47734bb7bbSEugene Zelenko #include <iterator> 4886de486dSMatt Arsenault 4986de486dSMatt Arsenault #define DEBUG_TYPE "amdgpu-codegenprepare" 5086de486dSMatt Arsenault 5186de486dSMatt Arsenault using namespace llvm; 5286de486dSMatt Arsenault 5386de486dSMatt Arsenault namespace { 5486de486dSMatt Arsenault 5590083d30SMatt Arsenault static cl::opt<bool> WidenLoads( 5690083d30SMatt Arsenault "amdgpu-codegenprepare-widen-constant-loads", 5790083d30SMatt Arsenault cl::desc("Widen sub-dword constant address space loads in AMDGPUCodeGenPrepare"), 5890083d30SMatt Arsenault cl::ReallyHidden, 5990083d30SMatt Arsenault cl::init(true)); 6090083d30SMatt Arsenault 61b3dd381aSMatt Arsenault static cl::opt<bool> UseMul24Intrin( 62b3dd381aSMatt Arsenault "amdgpu-codegenprepare-mul24", 63b3dd381aSMatt Arsenault cl::desc("Introduce mul24 intrinsics in AMDGPUCodeGenPrepare"), 64b3dd381aSMatt Arsenault cl::ReallyHidden, 65b3dd381aSMatt Arsenault cl::init(true)); 66b3dd381aSMatt Arsenault 679ec66860SMatt Arsenault // Legalize 64-bit division by using the generic IR expansion. 6834d9a16eSMatt Arsenault static cl::opt<bool> ExpandDiv64InIR( 6934d9a16eSMatt Arsenault "amdgpu-codegenprepare-expand-div64", 7034d9a16eSMatt Arsenault cl::desc("Expand 64-bit division in AMDGPUCodeGenPrepare"), 7134d9a16eSMatt Arsenault cl::ReallyHidden, 7234d9a16eSMatt Arsenault cl::init(false)); 7334d9a16eSMatt Arsenault 749ec66860SMatt Arsenault // Leave all division operations as they are. This supersedes ExpandDiv64InIR 759ec66860SMatt Arsenault // and is used for testing the legalizer. 769ec66860SMatt Arsenault static cl::opt<bool> DisableIDivExpand( 779ec66860SMatt Arsenault "amdgpu-codegenprepare-disable-idiv-expansion", 789ec66860SMatt Arsenault cl::desc("Prevent expanding integer division in AMDGPUCodeGenPrepare"), 799ec66860SMatt Arsenault cl::ReallyHidden, 809ec66860SMatt Arsenault cl::init(false)); 819ec66860SMatt Arsenault 8286de486dSMatt Arsenault class AMDGPUCodeGenPrepare : public FunctionPass, 83a1fe17c9SMatt Arsenault public InstVisitor<AMDGPUCodeGenPrepare, bool> { 845bfbae5cSTom Stellard const GCNSubtarget *ST = nullptr; 857e7268acSStanislav Mekhanoshin AssumptionCache *AC = nullptr; 86b30e1223SMatt Arsenault DominatorTree *DT = nullptr; 8735617ed4SNicolai Haehnle LegacyDivergenceAnalysis *DA = nullptr; 88734bb7bbSEugene Zelenko Module *Mod = nullptr; 8949169a96SMatt Arsenault const DataLayout *DL = nullptr; 90734bb7bbSEugene Zelenko bool HasUnsafeFPMath = false; 91db0ed3e4SMatt Arsenault bool HasFP32Denormals = false; 9286de486dSMatt Arsenault 935f8f34e4SAdrian Prantl /// Copies exact/nsw/nuw flags (if any) from binary operation \p I to 94f74fc60aSKonstantin Zhuravlyov /// binary operation \p V. 95e14df4b2SKonstantin Zhuravlyov /// 96f74fc60aSKonstantin Zhuravlyov /// \returns Binary operation \p V. 97f74fc60aSKonstantin Zhuravlyov /// \returns \p T's base element bit width. 98f74fc60aSKonstantin Zhuravlyov unsigned getBaseElementBitWidth(const Type *T) const; 99e14df4b2SKonstantin Zhuravlyov 100f74fc60aSKonstantin Zhuravlyov /// \returns Equivalent 32 bit integer type for given type \p T. For example, 101f74fc60aSKonstantin Zhuravlyov /// if \p T is i7, then i32 is returned; if \p T is <3 x i12>, then <3 x i32> 102f74fc60aSKonstantin Zhuravlyov /// is returned. 103e14df4b2SKonstantin Zhuravlyov Type *getI32Ty(IRBuilder<> &B, const Type *T) const; 104e14df4b2SKonstantin Zhuravlyov 105e14df4b2SKonstantin Zhuravlyov /// \returns True if binary operation \p I is a signed binary operation, false 106e14df4b2SKonstantin Zhuravlyov /// otherwise. 107e14df4b2SKonstantin Zhuravlyov bool isSigned(const BinaryOperator &I) const; 108e14df4b2SKonstantin Zhuravlyov 109e14df4b2SKonstantin Zhuravlyov /// \returns True if the condition of 'select' operation \p I comes from a 110e14df4b2SKonstantin Zhuravlyov /// signed 'icmp' operation, false otherwise. 111e14df4b2SKonstantin Zhuravlyov bool isSigned(const SelectInst &I) const; 112e14df4b2SKonstantin Zhuravlyov 113f74fc60aSKonstantin Zhuravlyov /// \returns True if type \p T needs to be promoted to 32 bit integer type, 114f74fc60aSKonstantin Zhuravlyov /// false otherwise. 115f74fc60aSKonstantin Zhuravlyov bool needsPromotionToI32(const Type *T) const; 116e14df4b2SKonstantin Zhuravlyov 1175f8f34e4SAdrian Prantl /// Promotes uniform binary operation \p I to equivalent 32 bit binary 118f74fc60aSKonstantin Zhuravlyov /// operation. 119f74fc60aSKonstantin Zhuravlyov /// 120f74fc60aSKonstantin Zhuravlyov /// \details \p I's base element bit width must be greater than 1 and less 121f74fc60aSKonstantin Zhuravlyov /// than or equal 16. Promotion is done by sign or zero extending operands to 122f74fc60aSKonstantin Zhuravlyov /// 32 bits, replacing \p I with equivalent 32 bit binary operation, and 123f74fc60aSKonstantin Zhuravlyov /// truncating the result of 32 bit binary operation back to \p I's original 124f74fc60aSKonstantin Zhuravlyov /// type. Division operation is not promoted. 125f74fc60aSKonstantin Zhuravlyov /// 126f74fc60aSKonstantin Zhuravlyov /// \returns True if \p I is promoted to equivalent 32 bit binary operation, 127f74fc60aSKonstantin Zhuravlyov /// false otherwise. 128f74fc60aSKonstantin Zhuravlyov bool promoteUniformOpToI32(BinaryOperator &I) const; 129f74fc60aSKonstantin Zhuravlyov 1305f8f34e4SAdrian Prantl /// Promotes uniform 'icmp' operation \p I to 32 bit 'icmp' operation. 131f74fc60aSKonstantin Zhuravlyov /// 132f74fc60aSKonstantin Zhuravlyov /// \details \p I's base element bit width must be greater than 1 and less 133f74fc60aSKonstantin Zhuravlyov /// than or equal 16. Promotion is done by sign or zero extending operands to 134f74fc60aSKonstantin Zhuravlyov /// 32 bits, and replacing \p I with 32 bit 'icmp' operation. 135e14df4b2SKonstantin Zhuravlyov /// 136e14df4b2SKonstantin Zhuravlyov /// \returns True. 137f74fc60aSKonstantin Zhuravlyov bool promoteUniformOpToI32(ICmpInst &I) const; 138e14df4b2SKonstantin Zhuravlyov 1395f8f34e4SAdrian Prantl /// Promotes uniform 'select' operation \p I to 32 bit 'select' 140f74fc60aSKonstantin Zhuravlyov /// operation. 141f74fc60aSKonstantin Zhuravlyov /// 142f74fc60aSKonstantin Zhuravlyov /// \details \p I's base element bit width must be greater than 1 and less 143f74fc60aSKonstantin Zhuravlyov /// than or equal 16. Promotion is done by sign or zero extending operands to 144f74fc60aSKonstantin Zhuravlyov /// 32 bits, replacing \p I with 32 bit 'select' operation, and truncating the 145f74fc60aSKonstantin Zhuravlyov /// result of 32 bit 'select' operation back to \p I's original type. 146e14df4b2SKonstantin Zhuravlyov /// 147e14df4b2SKonstantin Zhuravlyov /// \returns True. 148f74fc60aSKonstantin Zhuravlyov bool promoteUniformOpToI32(SelectInst &I) const; 149b4eb5d50SKonstantin Zhuravlyov 1505f8f34e4SAdrian Prantl /// Promotes uniform 'bitreverse' intrinsic \p I to 32 bit 'bitreverse' 151f74fc60aSKonstantin Zhuravlyov /// intrinsic. 152f74fc60aSKonstantin Zhuravlyov /// 153f74fc60aSKonstantin Zhuravlyov /// \details \p I's base element bit width must be greater than 1 and less 154f74fc60aSKonstantin Zhuravlyov /// than or equal 16. Promotion is done by zero extending the operand to 32 155f74fc60aSKonstantin Zhuravlyov /// bits, replacing \p I with 32 bit 'bitreverse' intrinsic, shifting the 156f74fc60aSKonstantin Zhuravlyov /// result of 32 bit 'bitreverse' intrinsic to the right with zero fill (the 157f74fc60aSKonstantin Zhuravlyov /// shift amount is 32 minus \p I's base element bit width), and truncating 158f74fc60aSKonstantin Zhuravlyov /// the result of the shift operation back to \p I's original type. 159b4eb5d50SKonstantin Zhuravlyov /// 160b4eb5d50SKonstantin Zhuravlyov /// \returns True. 161f74fc60aSKonstantin Zhuravlyov bool promoteUniformBitreverseToI32(IntrinsicInst &I) const; 16267aa18f1SStanislav Mekhanoshin 16349169a96SMatt Arsenault 16449169a96SMatt Arsenault unsigned numBitsUnsigned(Value *Op, unsigned ScalarSize) const; 16549169a96SMatt Arsenault unsigned numBitsSigned(Value *Op, unsigned ScalarSize) const; 16649169a96SMatt Arsenault bool isI24(Value *V, unsigned ScalarSize) const; 16749169a96SMatt Arsenault bool isU24(Value *V, unsigned ScalarSize) const; 16849169a96SMatt Arsenault 16949169a96SMatt Arsenault /// Replace mul instructions with llvm.amdgcn.mul.u24 or llvm.amdgcn.mul.s24. 17049169a96SMatt Arsenault /// SelectionDAG has an issue where an and asserting the bits are known 17149169a96SMatt Arsenault bool replaceMulWithMul24(BinaryOperator &I) const; 17249169a96SMatt Arsenault 173bcd91778SMatt Arsenault /// Perform same function as equivalently named function in DAGCombiner. Since 174bcd91778SMatt Arsenault /// we expand some divisions here, we need to perform this before obscuring. 175bcd91778SMatt Arsenault bool foldBinOpIntoSelect(BinaryOperator &I) const; 176bcd91778SMatt Arsenault 177b30e1223SMatt Arsenault bool divHasSpecialOptimization(BinaryOperator &I, 178b30e1223SMatt Arsenault Value *Num, Value *Den) const; 17934d9a16eSMatt Arsenault int getDivNumBits(BinaryOperator &I, 18034d9a16eSMatt Arsenault Value *Num, Value *Den, 18134d9a16eSMatt Arsenault unsigned AtLeast, bool Signed) const; 182b30e1223SMatt Arsenault 18367aa18f1SStanislav Mekhanoshin /// Expands 24 bit div or rem. 1847e7268acSStanislav Mekhanoshin Value* expandDivRem24(IRBuilder<> &Builder, BinaryOperator &I, 1857e7268acSStanislav Mekhanoshin Value *Num, Value *Den, 18667aa18f1SStanislav Mekhanoshin bool IsDiv, bool IsSigned) const; 18767aa18f1SStanislav Mekhanoshin 18834d9a16eSMatt Arsenault Value *expandDivRem24Impl(IRBuilder<> &Builder, BinaryOperator &I, 18934d9a16eSMatt Arsenault Value *Num, Value *Den, unsigned NumBits, 19034d9a16eSMatt Arsenault bool IsDiv, bool IsSigned) const; 19134d9a16eSMatt Arsenault 19267aa18f1SStanislav Mekhanoshin /// Expands 32 bit div or rem. 1937e7268acSStanislav Mekhanoshin Value* expandDivRem32(IRBuilder<> &Builder, BinaryOperator &I, 19467aa18f1SStanislav Mekhanoshin Value *Num, Value *Den) const; 19567aa18f1SStanislav Mekhanoshin 19634d9a16eSMatt Arsenault Value *shrinkDivRem64(IRBuilder<> &Builder, BinaryOperator &I, 19734d9a16eSMatt Arsenault Value *Num, Value *Den) const; 19834d9a16eSMatt Arsenault void expandDivRem64(BinaryOperator &I) const; 19934d9a16eSMatt Arsenault 2005f8f34e4SAdrian Prantl /// Widen a scalar load. 201a126a13bSWei Ding /// 202a126a13bSWei Ding /// \details \p Widen scalar load for uniform, small type loads from constant 203a126a13bSWei Ding // memory / to a full 32-bits and then truncate the input to allow a scalar 204a126a13bSWei Ding // load instead of a vector load. 205a126a13bSWei Ding // 206a126a13bSWei Ding /// \returns True. 207a126a13bSWei Ding 208a126a13bSWei Ding bool canWidenScalarExtLoad(LoadInst &I) const; 209e14df4b2SKonstantin Zhuravlyov 21086de486dSMatt Arsenault public: 21186de486dSMatt Arsenault static char ID; 212734bb7bbSEugene Zelenko 2138b61764cSFrancis Visoiu Mistrih AMDGPUCodeGenPrepare() : FunctionPass(ID) {} 214a1fe17c9SMatt Arsenault 215a1fe17c9SMatt Arsenault bool visitFDiv(BinaryOperator &I); 216a1fe17c9SMatt Arsenault 217e14df4b2SKonstantin Zhuravlyov bool visitInstruction(Instruction &I) { return false; } 218e14df4b2SKonstantin Zhuravlyov bool visitBinaryOperator(BinaryOperator &I); 219a126a13bSWei Ding bool visitLoadInst(LoadInst &I); 220e14df4b2SKonstantin Zhuravlyov bool visitICmpInst(ICmpInst &I); 221e14df4b2SKonstantin Zhuravlyov bool visitSelectInst(SelectInst &I); 22286de486dSMatt Arsenault 223b4eb5d50SKonstantin Zhuravlyov bool visitIntrinsicInst(IntrinsicInst &I); 224b4eb5d50SKonstantin Zhuravlyov bool visitBitreverseIntrinsicInst(IntrinsicInst &I); 225b4eb5d50SKonstantin Zhuravlyov 22686de486dSMatt Arsenault bool doInitialization(Module &M) override; 22786de486dSMatt Arsenault bool runOnFunction(Function &F) override; 22886de486dSMatt Arsenault 229117296c0SMehdi Amini StringRef getPassName() const override { return "AMDGPU IR optimizations"; } 23086de486dSMatt Arsenault 23186de486dSMatt Arsenault void getAnalysisUsage(AnalysisUsage &AU) const override { 2327e7268acSStanislav Mekhanoshin AU.addRequired<AssumptionCacheTracker>(); 23335617ed4SNicolai Haehnle AU.addRequired<LegacyDivergenceAnalysis>(); 234*65dbdc32SMatt Arsenault 235*65dbdc32SMatt Arsenault // FIXME: Division expansion needs to preserve the dominator tree. 236*65dbdc32SMatt Arsenault if (!ExpandDiv64InIR) 23786de486dSMatt Arsenault AU.setPreservesAll(); 23886de486dSMatt Arsenault } 23986de486dSMatt Arsenault }; 24086de486dSMatt Arsenault 241734bb7bbSEugene Zelenko } // end anonymous namespace 24286de486dSMatt Arsenault 243f74fc60aSKonstantin Zhuravlyov unsigned AMDGPUCodeGenPrepare::getBaseElementBitWidth(const Type *T) const { 244f74fc60aSKonstantin Zhuravlyov assert(needsPromotionToI32(T) && "T does not need promotion to i32"); 245e14df4b2SKonstantin Zhuravlyov 246e14df4b2SKonstantin Zhuravlyov if (T->isIntegerTy()) 247f74fc60aSKonstantin Zhuravlyov return T->getIntegerBitWidth(); 248f74fc60aSKonstantin Zhuravlyov return cast<VectorType>(T)->getElementType()->getIntegerBitWidth(); 249e14df4b2SKonstantin Zhuravlyov } 250e14df4b2SKonstantin Zhuravlyov 251e14df4b2SKonstantin Zhuravlyov Type *AMDGPUCodeGenPrepare::getI32Ty(IRBuilder<> &B, const Type *T) const { 252f74fc60aSKonstantin Zhuravlyov assert(needsPromotionToI32(T) && "T does not need promotion to i32"); 253e14df4b2SKonstantin Zhuravlyov 254e14df4b2SKonstantin Zhuravlyov if (T->isIntegerTy()) 255e14df4b2SKonstantin Zhuravlyov return B.getInt32Ty(); 256e14df4b2SKonstantin Zhuravlyov return VectorType::get(B.getInt32Ty(), cast<VectorType>(T)->getNumElements()); 257e14df4b2SKonstantin Zhuravlyov } 258e14df4b2SKonstantin Zhuravlyov 259e14df4b2SKonstantin Zhuravlyov bool AMDGPUCodeGenPrepare::isSigned(const BinaryOperator &I) const { 260691e2e02SKonstantin Zhuravlyov return I.getOpcode() == Instruction::AShr || 261691e2e02SKonstantin Zhuravlyov I.getOpcode() == Instruction::SDiv || I.getOpcode() == Instruction::SRem; 262e14df4b2SKonstantin Zhuravlyov } 263e14df4b2SKonstantin Zhuravlyov 264e14df4b2SKonstantin Zhuravlyov bool AMDGPUCodeGenPrepare::isSigned(const SelectInst &I) const { 265e14df4b2SKonstantin Zhuravlyov return isa<ICmpInst>(I.getOperand(0)) ? 266e14df4b2SKonstantin Zhuravlyov cast<ICmpInst>(I.getOperand(0))->isSigned() : false; 267e14df4b2SKonstantin Zhuravlyov } 268e14df4b2SKonstantin Zhuravlyov 269f74fc60aSKonstantin Zhuravlyov bool AMDGPUCodeGenPrepare::needsPromotionToI32(const Type *T) const { 270eb522e68SMatt Arsenault const IntegerType *IntTy = dyn_cast<IntegerType>(T); 271eb522e68SMatt Arsenault if (IntTy && IntTy->getBitWidth() > 1 && IntTy->getBitWidth() <= 16) 272f74fc60aSKonstantin Zhuravlyov return true; 273eb522e68SMatt Arsenault 274eb522e68SMatt Arsenault if (const VectorType *VT = dyn_cast<VectorType>(T)) { 275eb522e68SMatt Arsenault // TODO: The set of packed operations is more limited, so may want to 276eb522e68SMatt Arsenault // promote some anyway. 277eb522e68SMatt Arsenault if (ST->hasVOP3PInsts()) 278f74fc60aSKonstantin Zhuravlyov return false; 279eb522e68SMatt Arsenault 280eb522e68SMatt Arsenault return needsPromotionToI32(VT->getElementType()); 281eb522e68SMatt Arsenault } 282eb522e68SMatt Arsenault 283eb522e68SMatt Arsenault return false; 284f74fc60aSKonstantin Zhuravlyov } 285e14df4b2SKonstantin Zhuravlyov 286d59e6404SMatt Arsenault // Return true if the op promoted to i32 should have nsw set. 287d59e6404SMatt Arsenault static bool promotedOpIsNSW(const Instruction &I) { 288d59e6404SMatt Arsenault switch (I.getOpcode()) { 289d59e6404SMatt Arsenault case Instruction::Shl: 290d59e6404SMatt Arsenault case Instruction::Add: 291d59e6404SMatt Arsenault case Instruction::Sub: 292d59e6404SMatt Arsenault return true; 293d59e6404SMatt Arsenault case Instruction::Mul: 294d59e6404SMatt Arsenault return I.hasNoUnsignedWrap(); 295d59e6404SMatt Arsenault default: 296d59e6404SMatt Arsenault return false; 297d59e6404SMatt Arsenault } 298d59e6404SMatt Arsenault } 299d59e6404SMatt Arsenault 300d59e6404SMatt Arsenault // Return true if the op promoted to i32 should have nuw set. 301d59e6404SMatt Arsenault static bool promotedOpIsNUW(const Instruction &I) { 302d59e6404SMatt Arsenault switch (I.getOpcode()) { 303d59e6404SMatt Arsenault case Instruction::Shl: 304d59e6404SMatt Arsenault case Instruction::Add: 305d59e6404SMatt Arsenault case Instruction::Mul: 306d59e6404SMatt Arsenault return true; 307d59e6404SMatt Arsenault case Instruction::Sub: 308d59e6404SMatt Arsenault return I.hasNoUnsignedWrap(); 309d59e6404SMatt Arsenault default: 310d59e6404SMatt Arsenault return false; 311d59e6404SMatt Arsenault } 312d59e6404SMatt Arsenault } 313d59e6404SMatt Arsenault 314a126a13bSWei Ding bool AMDGPUCodeGenPrepare::canWidenScalarExtLoad(LoadInst &I) const { 315a126a13bSWei Ding Type *Ty = I.getType(); 316a126a13bSWei Ding const DataLayout &DL = Mod->getDataLayout(); 317a126a13bSWei Ding int TySize = DL.getTypeSizeInBits(Ty); 318a126a13bSWei Ding unsigned Align = I.getAlignment() ? 319a126a13bSWei Ding I.getAlignment() : DL.getABITypeAlignment(Ty); 320a126a13bSWei Ding 321a126a13bSWei Ding return I.isSimple() && TySize < 32 && Align >= 4 && DA->isUniform(&I); 322a126a13bSWei Ding } 323a126a13bSWei Ding 324f74fc60aSKonstantin Zhuravlyov bool AMDGPUCodeGenPrepare::promoteUniformOpToI32(BinaryOperator &I) const { 325f74fc60aSKonstantin Zhuravlyov assert(needsPromotionToI32(I.getType()) && 326f74fc60aSKonstantin Zhuravlyov "I does not need promotion to i32"); 327f74fc60aSKonstantin Zhuravlyov 328f74fc60aSKonstantin Zhuravlyov if (I.getOpcode() == Instruction::SDiv || 32967aa18f1SStanislav Mekhanoshin I.getOpcode() == Instruction::UDiv || 33067aa18f1SStanislav Mekhanoshin I.getOpcode() == Instruction::SRem || 33167aa18f1SStanislav Mekhanoshin I.getOpcode() == Instruction::URem) 332e14df4b2SKonstantin Zhuravlyov return false; 333e14df4b2SKonstantin Zhuravlyov 334e14df4b2SKonstantin Zhuravlyov IRBuilder<> Builder(&I); 335e14df4b2SKonstantin Zhuravlyov Builder.SetCurrentDebugLocation(I.getDebugLoc()); 336e14df4b2SKonstantin Zhuravlyov 337e14df4b2SKonstantin Zhuravlyov Type *I32Ty = getI32Ty(Builder, I.getType()); 338e14df4b2SKonstantin Zhuravlyov Value *ExtOp0 = nullptr; 339e14df4b2SKonstantin Zhuravlyov Value *ExtOp1 = nullptr; 340e14df4b2SKonstantin Zhuravlyov Value *ExtRes = nullptr; 341e14df4b2SKonstantin Zhuravlyov Value *TruncRes = nullptr; 342e14df4b2SKonstantin Zhuravlyov 343e14df4b2SKonstantin Zhuravlyov if (isSigned(I)) { 344e14df4b2SKonstantin Zhuravlyov ExtOp0 = Builder.CreateSExt(I.getOperand(0), I32Ty); 345e14df4b2SKonstantin Zhuravlyov ExtOp1 = Builder.CreateSExt(I.getOperand(1), I32Ty); 346e14df4b2SKonstantin Zhuravlyov } else { 347e14df4b2SKonstantin Zhuravlyov ExtOp0 = Builder.CreateZExt(I.getOperand(0), I32Ty); 348e14df4b2SKonstantin Zhuravlyov ExtOp1 = Builder.CreateZExt(I.getOperand(1), I32Ty); 349e14df4b2SKonstantin Zhuravlyov } 350d59e6404SMatt Arsenault 351d59e6404SMatt Arsenault ExtRes = Builder.CreateBinOp(I.getOpcode(), ExtOp0, ExtOp1); 352d59e6404SMatt Arsenault if (Instruction *Inst = dyn_cast<Instruction>(ExtRes)) { 353d59e6404SMatt Arsenault if (promotedOpIsNSW(cast<Instruction>(I))) 354d59e6404SMatt Arsenault Inst->setHasNoSignedWrap(); 355d59e6404SMatt Arsenault 356d59e6404SMatt Arsenault if (promotedOpIsNUW(cast<Instruction>(I))) 357d59e6404SMatt Arsenault Inst->setHasNoUnsignedWrap(); 358d59e6404SMatt Arsenault 359d59e6404SMatt Arsenault if (const auto *ExactOp = dyn_cast<PossiblyExactOperator>(&I)) 360d59e6404SMatt Arsenault Inst->setIsExact(ExactOp->isExact()); 361d59e6404SMatt Arsenault } 362d59e6404SMatt Arsenault 363f74fc60aSKonstantin Zhuravlyov TruncRes = Builder.CreateTrunc(ExtRes, I.getType()); 364e14df4b2SKonstantin Zhuravlyov 365e14df4b2SKonstantin Zhuravlyov I.replaceAllUsesWith(TruncRes); 366e14df4b2SKonstantin Zhuravlyov I.eraseFromParent(); 367e14df4b2SKonstantin Zhuravlyov 368e14df4b2SKonstantin Zhuravlyov return true; 369e14df4b2SKonstantin Zhuravlyov } 370e14df4b2SKonstantin Zhuravlyov 371f74fc60aSKonstantin Zhuravlyov bool AMDGPUCodeGenPrepare::promoteUniformOpToI32(ICmpInst &I) const { 372f74fc60aSKonstantin Zhuravlyov assert(needsPromotionToI32(I.getOperand(0)->getType()) && 373f74fc60aSKonstantin Zhuravlyov "I does not need promotion to i32"); 374e14df4b2SKonstantin Zhuravlyov 375e14df4b2SKonstantin Zhuravlyov IRBuilder<> Builder(&I); 376e14df4b2SKonstantin Zhuravlyov Builder.SetCurrentDebugLocation(I.getDebugLoc()); 377e14df4b2SKonstantin Zhuravlyov 378f74fc60aSKonstantin Zhuravlyov Type *I32Ty = getI32Ty(Builder, I.getOperand(0)->getType()); 379e14df4b2SKonstantin Zhuravlyov Value *ExtOp0 = nullptr; 380e14df4b2SKonstantin Zhuravlyov Value *ExtOp1 = nullptr; 381e14df4b2SKonstantin Zhuravlyov Value *NewICmp = nullptr; 382e14df4b2SKonstantin Zhuravlyov 383e14df4b2SKonstantin Zhuravlyov if (I.isSigned()) { 384f74fc60aSKonstantin Zhuravlyov ExtOp0 = Builder.CreateSExt(I.getOperand(0), I32Ty); 385f74fc60aSKonstantin Zhuravlyov ExtOp1 = Builder.CreateSExt(I.getOperand(1), I32Ty); 386e14df4b2SKonstantin Zhuravlyov } else { 387f74fc60aSKonstantin Zhuravlyov ExtOp0 = Builder.CreateZExt(I.getOperand(0), I32Ty); 388f74fc60aSKonstantin Zhuravlyov ExtOp1 = Builder.CreateZExt(I.getOperand(1), I32Ty); 389e14df4b2SKonstantin Zhuravlyov } 390e14df4b2SKonstantin Zhuravlyov NewICmp = Builder.CreateICmp(I.getPredicate(), ExtOp0, ExtOp1); 391e14df4b2SKonstantin Zhuravlyov 392e14df4b2SKonstantin Zhuravlyov I.replaceAllUsesWith(NewICmp); 393e14df4b2SKonstantin Zhuravlyov I.eraseFromParent(); 394e14df4b2SKonstantin Zhuravlyov 395e14df4b2SKonstantin Zhuravlyov return true; 396e14df4b2SKonstantin Zhuravlyov } 397e14df4b2SKonstantin Zhuravlyov 398f74fc60aSKonstantin Zhuravlyov bool AMDGPUCodeGenPrepare::promoteUniformOpToI32(SelectInst &I) const { 399f74fc60aSKonstantin Zhuravlyov assert(needsPromotionToI32(I.getType()) && 400f74fc60aSKonstantin Zhuravlyov "I does not need promotion to i32"); 401e14df4b2SKonstantin Zhuravlyov 402e14df4b2SKonstantin Zhuravlyov IRBuilder<> Builder(&I); 403e14df4b2SKonstantin Zhuravlyov Builder.SetCurrentDebugLocation(I.getDebugLoc()); 404e14df4b2SKonstantin Zhuravlyov 405e14df4b2SKonstantin Zhuravlyov Type *I32Ty = getI32Ty(Builder, I.getType()); 406e14df4b2SKonstantin Zhuravlyov Value *ExtOp1 = nullptr; 407e14df4b2SKonstantin Zhuravlyov Value *ExtOp2 = nullptr; 408e14df4b2SKonstantin Zhuravlyov Value *ExtRes = nullptr; 409e14df4b2SKonstantin Zhuravlyov Value *TruncRes = nullptr; 410e14df4b2SKonstantin Zhuravlyov 411e14df4b2SKonstantin Zhuravlyov if (isSigned(I)) { 412e14df4b2SKonstantin Zhuravlyov ExtOp1 = Builder.CreateSExt(I.getOperand(1), I32Ty); 413e14df4b2SKonstantin Zhuravlyov ExtOp2 = Builder.CreateSExt(I.getOperand(2), I32Ty); 414e14df4b2SKonstantin Zhuravlyov } else { 415e14df4b2SKonstantin Zhuravlyov ExtOp1 = Builder.CreateZExt(I.getOperand(1), I32Ty); 416e14df4b2SKonstantin Zhuravlyov ExtOp2 = Builder.CreateZExt(I.getOperand(2), I32Ty); 417e14df4b2SKonstantin Zhuravlyov } 418e14df4b2SKonstantin Zhuravlyov ExtRes = Builder.CreateSelect(I.getOperand(0), ExtOp1, ExtOp2); 419f74fc60aSKonstantin Zhuravlyov TruncRes = Builder.CreateTrunc(ExtRes, I.getType()); 420e14df4b2SKonstantin Zhuravlyov 421e14df4b2SKonstantin Zhuravlyov I.replaceAllUsesWith(TruncRes); 422e14df4b2SKonstantin Zhuravlyov I.eraseFromParent(); 423e14df4b2SKonstantin Zhuravlyov 424e14df4b2SKonstantin Zhuravlyov return true; 425e14df4b2SKonstantin Zhuravlyov } 426e14df4b2SKonstantin Zhuravlyov 427f74fc60aSKonstantin Zhuravlyov bool AMDGPUCodeGenPrepare::promoteUniformBitreverseToI32( 428b4eb5d50SKonstantin Zhuravlyov IntrinsicInst &I) const { 429f74fc60aSKonstantin Zhuravlyov assert(I.getIntrinsicID() == Intrinsic::bitreverse && 430f74fc60aSKonstantin Zhuravlyov "I must be bitreverse intrinsic"); 431f74fc60aSKonstantin Zhuravlyov assert(needsPromotionToI32(I.getType()) && 432f74fc60aSKonstantin Zhuravlyov "I does not need promotion to i32"); 433b4eb5d50SKonstantin Zhuravlyov 434b4eb5d50SKonstantin Zhuravlyov IRBuilder<> Builder(&I); 435b4eb5d50SKonstantin Zhuravlyov Builder.SetCurrentDebugLocation(I.getDebugLoc()); 436b4eb5d50SKonstantin Zhuravlyov 437b4eb5d50SKonstantin Zhuravlyov Type *I32Ty = getI32Ty(Builder, I.getType()); 438b4eb5d50SKonstantin Zhuravlyov Function *I32 = 439c09e2d7eSKonstantin Zhuravlyov Intrinsic::getDeclaration(Mod, Intrinsic::bitreverse, { I32Ty }); 440b4eb5d50SKonstantin Zhuravlyov Value *ExtOp = Builder.CreateZExt(I.getOperand(0), I32Ty); 441b4eb5d50SKonstantin Zhuravlyov Value *ExtRes = Builder.CreateCall(I32, { ExtOp }); 442f74fc60aSKonstantin Zhuravlyov Value *LShrOp = 443f74fc60aSKonstantin Zhuravlyov Builder.CreateLShr(ExtRes, 32 - getBaseElementBitWidth(I.getType())); 444b4eb5d50SKonstantin Zhuravlyov Value *TruncRes = 445f74fc60aSKonstantin Zhuravlyov Builder.CreateTrunc(LShrOp, I.getType()); 446b4eb5d50SKonstantin Zhuravlyov 447b4eb5d50SKonstantin Zhuravlyov I.replaceAllUsesWith(TruncRes); 448b4eb5d50SKonstantin Zhuravlyov I.eraseFromParent(); 449b4eb5d50SKonstantin Zhuravlyov 450b4eb5d50SKonstantin Zhuravlyov return true; 451b4eb5d50SKonstantin Zhuravlyov } 452b4eb5d50SKonstantin Zhuravlyov 45349169a96SMatt Arsenault unsigned AMDGPUCodeGenPrepare::numBitsUnsigned(Value *Op, 45449169a96SMatt Arsenault unsigned ScalarSize) const { 45549169a96SMatt Arsenault KnownBits Known = computeKnownBits(Op, *DL, 0, AC); 45649169a96SMatt Arsenault return ScalarSize - Known.countMinLeadingZeros(); 45749169a96SMatt Arsenault } 45849169a96SMatt Arsenault 45949169a96SMatt Arsenault unsigned AMDGPUCodeGenPrepare::numBitsSigned(Value *Op, 46049169a96SMatt Arsenault unsigned ScalarSize) const { 46149169a96SMatt Arsenault // In order for this to be a signed 24-bit value, bit 23, must 46249169a96SMatt Arsenault // be a sign bit. 46349169a96SMatt Arsenault return ScalarSize - ComputeNumSignBits(Op, *DL, 0, AC); 46449169a96SMatt Arsenault } 46549169a96SMatt Arsenault 46649169a96SMatt Arsenault bool AMDGPUCodeGenPrepare::isI24(Value *V, unsigned ScalarSize) const { 46749169a96SMatt Arsenault return ScalarSize >= 24 && // Types less than 24-bit should be treated 46849169a96SMatt Arsenault // as unsigned 24-bit values. 46949169a96SMatt Arsenault numBitsSigned(V, ScalarSize) < 24; 47049169a96SMatt Arsenault } 47149169a96SMatt Arsenault 47249169a96SMatt Arsenault bool AMDGPUCodeGenPrepare::isU24(Value *V, unsigned ScalarSize) const { 47349169a96SMatt Arsenault return numBitsUnsigned(V, ScalarSize) <= 24; 47449169a96SMatt Arsenault } 47549169a96SMatt Arsenault 47649169a96SMatt Arsenault static void extractValues(IRBuilder<> &Builder, 47749169a96SMatt Arsenault SmallVectorImpl<Value *> &Values, Value *V) { 47849169a96SMatt Arsenault VectorType *VT = dyn_cast<VectorType>(V->getType()); 47949169a96SMatt Arsenault if (!VT) { 48049169a96SMatt Arsenault Values.push_back(V); 48149169a96SMatt Arsenault return; 48249169a96SMatt Arsenault } 48349169a96SMatt Arsenault 48449169a96SMatt Arsenault for (int I = 0, E = VT->getNumElements(); I != E; ++I) 48549169a96SMatt Arsenault Values.push_back(Builder.CreateExtractElement(V, I)); 48649169a96SMatt Arsenault } 48749169a96SMatt Arsenault 48849169a96SMatt Arsenault static Value *insertValues(IRBuilder<> &Builder, 48949169a96SMatt Arsenault Type *Ty, 49049169a96SMatt Arsenault SmallVectorImpl<Value *> &Values) { 49149169a96SMatt Arsenault if (Values.size() == 1) 49249169a96SMatt Arsenault return Values[0]; 49349169a96SMatt Arsenault 49449169a96SMatt Arsenault Value *NewVal = UndefValue::get(Ty); 49549169a96SMatt Arsenault for (int I = 0, E = Values.size(); I != E; ++I) 49649169a96SMatt Arsenault NewVal = Builder.CreateInsertElement(NewVal, Values[I], I); 49749169a96SMatt Arsenault 49849169a96SMatt Arsenault return NewVal; 49949169a96SMatt Arsenault } 50049169a96SMatt Arsenault 50149169a96SMatt Arsenault bool AMDGPUCodeGenPrepare::replaceMulWithMul24(BinaryOperator &I) const { 50249169a96SMatt Arsenault if (I.getOpcode() != Instruction::Mul) 50349169a96SMatt Arsenault return false; 50449169a96SMatt Arsenault 50549169a96SMatt Arsenault Type *Ty = I.getType(); 50649169a96SMatt Arsenault unsigned Size = Ty->getScalarSizeInBits(); 50749169a96SMatt Arsenault if (Size <= 16 && ST->has16BitInsts()) 50849169a96SMatt Arsenault return false; 50949169a96SMatt Arsenault 51049169a96SMatt Arsenault // Prefer scalar if this could be s_mul_i32 51149169a96SMatt Arsenault if (DA->isUniform(&I)) 51249169a96SMatt Arsenault return false; 51349169a96SMatt Arsenault 51449169a96SMatt Arsenault Value *LHS = I.getOperand(0); 51549169a96SMatt Arsenault Value *RHS = I.getOperand(1); 51649169a96SMatt Arsenault IRBuilder<> Builder(&I); 51749169a96SMatt Arsenault Builder.SetCurrentDebugLocation(I.getDebugLoc()); 51849169a96SMatt Arsenault 51949169a96SMatt Arsenault Intrinsic::ID IntrID = Intrinsic::not_intrinsic; 52049169a96SMatt Arsenault 52149169a96SMatt Arsenault // TODO: Should this try to match mulhi24? 52249169a96SMatt Arsenault if (ST->hasMulU24() && isU24(LHS, Size) && isU24(RHS, Size)) { 52349169a96SMatt Arsenault IntrID = Intrinsic::amdgcn_mul_u24; 52449169a96SMatt Arsenault } else if (ST->hasMulI24() && isI24(LHS, Size) && isI24(RHS, Size)) { 52549169a96SMatt Arsenault IntrID = Intrinsic::amdgcn_mul_i24; 52649169a96SMatt Arsenault } else 52749169a96SMatt Arsenault return false; 52849169a96SMatt Arsenault 52949169a96SMatt Arsenault SmallVector<Value *, 4> LHSVals; 53049169a96SMatt Arsenault SmallVector<Value *, 4> RHSVals; 53149169a96SMatt Arsenault SmallVector<Value *, 4> ResultVals; 53249169a96SMatt Arsenault extractValues(Builder, LHSVals, LHS); 53349169a96SMatt Arsenault extractValues(Builder, RHSVals, RHS); 53449169a96SMatt Arsenault 53549169a96SMatt Arsenault 53649169a96SMatt Arsenault IntegerType *I32Ty = Builder.getInt32Ty(); 53749169a96SMatt Arsenault FunctionCallee Intrin = Intrinsic::getDeclaration(Mod, IntrID); 53849169a96SMatt Arsenault for (int I = 0, E = LHSVals.size(); I != E; ++I) { 53949169a96SMatt Arsenault Value *LHS, *RHS; 54049169a96SMatt Arsenault if (IntrID == Intrinsic::amdgcn_mul_u24) { 54149169a96SMatt Arsenault LHS = Builder.CreateZExtOrTrunc(LHSVals[I], I32Ty); 54249169a96SMatt Arsenault RHS = Builder.CreateZExtOrTrunc(RHSVals[I], I32Ty); 54349169a96SMatt Arsenault } else { 54449169a96SMatt Arsenault LHS = Builder.CreateSExtOrTrunc(LHSVals[I], I32Ty); 54549169a96SMatt Arsenault RHS = Builder.CreateSExtOrTrunc(RHSVals[I], I32Ty); 54649169a96SMatt Arsenault } 54749169a96SMatt Arsenault 54849169a96SMatt Arsenault Value *Result = Builder.CreateCall(Intrin, {LHS, RHS}); 54949169a96SMatt Arsenault 55049169a96SMatt Arsenault if (IntrID == Intrinsic::amdgcn_mul_u24) { 55149169a96SMatt Arsenault ResultVals.push_back(Builder.CreateZExtOrTrunc(Result, 55249169a96SMatt Arsenault LHSVals[I]->getType())); 55349169a96SMatt Arsenault } else { 55449169a96SMatt Arsenault ResultVals.push_back(Builder.CreateSExtOrTrunc(Result, 55549169a96SMatt Arsenault LHSVals[I]->getType())); 55649169a96SMatt Arsenault } 55749169a96SMatt Arsenault } 55849169a96SMatt Arsenault 559c6ab2b4fSMatt Arsenault Value *NewVal = insertValues(Builder, Ty, ResultVals); 560c6ab2b4fSMatt Arsenault NewVal->takeName(&I); 561c6ab2b4fSMatt Arsenault I.replaceAllUsesWith(NewVal); 56249169a96SMatt Arsenault I.eraseFromParent(); 56349169a96SMatt Arsenault 56449169a96SMatt Arsenault return true; 56549169a96SMatt Arsenault } 56649169a96SMatt Arsenault 5672fe500abSMatt Arsenault // Find a select instruction, which may have been casted. This is mostly to deal 568e93e1b62SMatt Arsenault // with cases where i16 selects were promoted here to i32. 5692fe500abSMatt Arsenault static SelectInst *findSelectThroughCast(Value *V, CastInst *&Cast) { 5702fe500abSMatt Arsenault Cast = nullptr; 5712fe500abSMatt Arsenault if (SelectInst *Sel = dyn_cast<SelectInst>(V)) 5722fe500abSMatt Arsenault return Sel; 5732fe500abSMatt Arsenault 5742fe500abSMatt Arsenault if ((Cast = dyn_cast<CastInst>(V))) { 5752fe500abSMatt Arsenault if (SelectInst *Sel = dyn_cast<SelectInst>(Cast->getOperand(0))) 5762fe500abSMatt Arsenault return Sel; 5772fe500abSMatt Arsenault } 5782fe500abSMatt Arsenault 5792fe500abSMatt Arsenault return nullptr; 5802fe500abSMatt Arsenault } 5812fe500abSMatt Arsenault 582bcd91778SMatt Arsenault bool AMDGPUCodeGenPrepare::foldBinOpIntoSelect(BinaryOperator &BO) const { 583bcd91778SMatt Arsenault // Don't do this unless the old select is going away. We want to eliminate the 584bcd91778SMatt Arsenault // binary operator, not replace a binop with a select. 585bcd91778SMatt Arsenault int SelOpNo = 0; 5862fe500abSMatt Arsenault 5872fe500abSMatt Arsenault CastInst *CastOp; 5882fe500abSMatt Arsenault 589dfec7022SMatt Arsenault // TODO: Should probably try to handle some cases with multiple 590dfec7022SMatt Arsenault // users. Duplicating the select may be profitable for division. 5912fe500abSMatt Arsenault SelectInst *Sel = findSelectThroughCast(BO.getOperand(0), CastOp); 592bcd91778SMatt Arsenault if (!Sel || !Sel->hasOneUse()) { 593bcd91778SMatt Arsenault SelOpNo = 1; 5942fe500abSMatt Arsenault Sel = findSelectThroughCast(BO.getOperand(1), CastOp); 595bcd91778SMatt Arsenault } 596bcd91778SMatt Arsenault 597bcd91778SMatt Arsenault if (!Sel || !Sel->hasOneUse()) 598bcd91778SMatt Arsenault return false; 599bcd91778SMatt Arsenault 600bcd91778SMatt Arsenault Constant *CT = dyn_cast<Constant>(Sel->getTrueValue()); 601bcd91778SMatt Arsenault Constant *CF = dyn_cast<Constant>(Sel->getFalseValue()); 602bcd91778SMatt Arsenault Constant *CBO = dyn_cast<Constant>(BO.getOperand(SelOpNo ^ 1)); 603bcd91778SMatt Arsenault if (!CBO || !CT || !CF) 604bcd91778SMatt Arsenault return false; 605bcd91778SMatt Arsenault 6062fe500abSMatt Arsenault if (CastOp) { 607dfec7022SMatt Arsenault if (!CastOp->hasOneUse()) 608dfec7022SMatt Arsenault return false; 6092fe500abSMatt Arsenault CT = ConstantFoldCastOperand(CastOp->getOpcode(), CT, BO.getType(), *DL); 6102fe500abSMatt Arsenault CF = ConstantFoldCastOperand(CastOp->getOpcode(), CF, BO.getType(), *DL); 6112fe500abSMatt Arsenault } 6122fe500abSMatt Arsenault 613bcd91778SMatt Arsenault // TODO: Handle special 0/-1 cases DAG combine does, although we only really 614bcd91778SMatt Arsenault // need to handle divisions here. 615bcd91778SMatt Arsenault Constant *FoldedT = SelOpNo ? 616bcd91778SMatt Arsenault ConstantFoldBinaryOpOperands(BO.getOpcode(), CBO, CT, *DL) : 617bcd91778SMatt Arsenault ConstantFoldBinaryOpOperands(BO.getOpcode(), CT, CBO, *DL); 618bcd91778SMatt Arsenault if (isa<ConstantExpr>(FoldedT)) 619bcd91778SMatt Arsenault return false; 620bcd91778SMatt Arsenault 621bcd91778SMatt Arsenault Constant *FoldedF = SelOpNo ? 622bcd91778SMatt Arsenault ConstantFoldBinaryOpOperands(BO.getOpcode(), CBO, CF, *DL) : 623bcd91778SMatt Arsenault ConstantFoldBinaryOpOperands(BO.getOpcode(), CF, CBO, *DL); 624bcd91778SMatt Arsenault if (isa<ConstantExpr>(FoldedF)) 625bcd91778SMatt Arsenault return false; 626bcd91778SMatt Arsenault 627bcd91778SMatt Arsenault IRBuilder<> Builder(&BO); 628bcd91778SMatt Arsenault Builder.SetCurrentDebugLocation(BO.getDebugLoc()); 629bcd91778SMatt Arsenault if (const FPMathOperator *FPOp = dyn_cast<const FPMathOperator>(&BO)) 630bcd91778SMatt Arsenault Builder.setFastMathFlags(FPOp->getFastMathFlags()); 631bcd91778SMatt Arsenault 632bcd91778SMatt Arsenault Value *NewSelect = Builder.CreateSelect(Sel->getCondition(), 633bcd91778SMatt Arsenault FoldedT, FoldedF); 634bcd91778SMatt Arsenault NewSelect->takeName(&BO); 635bcd91778SMatt Arsenault BO.replaceAllUsesWith(NewSelect); 636bcd91778SMatt Arsenault BO.eraseFromParent(); 6372fe500abSMatt Arsenault if (CastOp) 6382fe500abSMatt Arsenault CastOp->eraseFromParent(); 639bcd91778SMatt Arsenault Sel->eraseFromParent(); 640bcd91778SMatt Arsenault return true; 641bcd91778SMatt Arsenault } 642bcd91778SMatt Arsenault 643884acbb9SChangpeng Fang // Optimize fdiv with rcp: 64425315359SChangpeng Fang // 645884acbb9SChangpeng Fang // 1/x -> rcp(x) when rcp is sufficiently accurate or inaccurate rcp is 646884acbb9SChangpeng Fang // allowed with unsafe-fp-math or afn. 64725315359SChangpeng Fang // 648884acbb9SChangpeng Fang // a/b -> a*rcp(b) when inaccurate rcp is allowed with unsafe-fp-math or afn. 649884acbb9SChangpeng Fang static Value *optimizeWithRcp(Value *Num, Value *Den, bool AllowInaccurateRcp, 650884acbb9SChangpeng Fang bool RcpIsAccurate, IRBuilder<> Builder, 651884acbb9SChangpeng Fang Module *Mod) { 65225315359SChangpeng Fang 653884acbb9SChangpeng Fang if (!AllowInaccurateRcp && !RcpIsAccurate) 65425315359SChangpeng Fang return nullptr; 65525315359SChangpeng Fang 656884acbb9SChangpeng Fang Type *Ty = Den->getType(); 65725315359SChangpeng Fang if (const ConstantFP *CLHS = dyn_cast<ConstantFP>(Num)) { 658884acbb9SChangpeng Fang if (AllowInaccurateRcp || RcpIsAccurate) { 65925315359SChangpeng Fang if (CLHS->isExactlyValue(1.0)) { 660b87e3e2dSMatt Arsenault Function *Decl = Intrinsic::getDeclaration( 661b87e3e2dSMatt Arsenault Mod, Intrinsic::amdgcn_rcp, Ty); 662b87e3e2dSMatt Arsenault 66325315359SChangpeng Fang // v_rcp_f32 and v_rsq_f32 do not support denormals, and according to 66425315359SChangpeng Fang // the CI documentation has a worst case error of 1 ulp. 66525315359SChangpeng Fang // OpenCL requires <= 2.5 ulp for 1.0 / x, so it should always be OK to 66625315359SChangpeng Fang // use it as long as we aren't trying to use denormals. 66725315359SChangpeng Fang // 66825315359SChangpeng Fang // v_rcp_f16 and v_rsq_f16 DO support denormals. 66925315359SChangpeng Fang 67025315359SChangpeng Fang // NOTE: v_sqrt and v_rcp will be combined to v_rsq later. So we don't 67125315359SChangpeng Fang // insert rsq intrinsic here. 67225315359SChangpeng Fang 67325315359SChangpeng Fang // 1.0 / x -> rcp(x) 67425315359SChangpeng Fang return Builder.CreateCall(Decl, { Den }); 67525315359SChangpeng Fang } 67625315359SChangpeng Fang 67725315359SChangpeng Fang // Same as for 1.0, but expand the sign out of the constant. 67825315359SChangpeng Fang if (CLHS->isExactlyValue(-1.0)) { 679b87e3e2dSMatt Arsenault Function *Decl = Intrinsic::getDeclaration( 680b87e3e2dSMatt Arsenault Mod, Intrinsic::amdgcn_rcp, Ty); 681b87e3e2dSMatt Arsenault 68225315359SChangpeng Fang // -1.0 / x -> rcp (fneg x) 68325315359SChangpeng Fang Value *FNeg = Builder.CreateFNeg(Den); 68425315359SChangpeng Fang return Builder.CreateCall(Decl, { FNeg }); 68525315359SChangpeng Fang } 68625315359SChangpeng Fang } 68725315359SChangpeng Fang } 68825315359SChangpeng Fang 689884acbb9SChangpeng Fang if (AllowInaccurateRcp) { 690b87e3e2dSMatt Arsenault Function *Decl = Intrinsic::getDeclaration( 691b87e3e2dSMatt Arsenault Mod, Intrinsic::amdgcn_rcp, Ty); 692b87e3e2dSMatt Arsenault 69325315359SChangpeng Fang // Turn into multiply by the reciprocal. 69425315359SChangpeng Fang // x / y -> x * (1.0 / y) 69525315359SChangpeng Fang Value *Recip = Builder.CreateCall(Decl, { Den }); 696884acbb9SChangpeng Fang return Builder.CreateFMul(Num, Recip); 69725315359SChangpeng Fang } 69825315359SChangpeng Fang return nullptr; 69925315359SChangpeng Fang } 70025315359SChangpeng Fang 701884acbb9SChangpeng Fang // optimize with fdiv.fast: 702884acbb9SChangpeng Fang // 703884acbb9SChangpeng Fang // a/b -> fdiv.fast(a, b) when !fpmath >= 2.5ulp with denormals flushed. 704884acbb9SChangpeng Fang // 705884acbb9SChangpeng Fang // 1/x -> fdiv.fast(1,x) when !fpmath >= 2.5ulp. 706884acbb9SChangpeng Fang // 707884acbb9SChangpeng Fang // NOTE: optimizeWithRcp should be tried first because rcp is the preference. 708884acbb9SChangpeng Fang static Value *optimizeWithFDivFast(Value *Num, Value *Den, float ReqdAccuracy, 709884acbb9SChangpeng Fang bool HasDenormals, IRBuilder<> Builder, 710884acbb9SChangpeng Fang Module *Mod) { 711884acbb9SChangpeng Fang // fdiv.fast can achieve 2.5 ULP accuracy. 712884acbb9SChangpeng Fang if (ReqdAccuracy < 2.5f) 713884acbb9SChangpeng Fang return nullptr; 714df61be70SStanislav Mekhanoshin 715884acbb9SChangpeng Fang // Only have fdiv.fast for f32. 716884acbb9SChangpeng Fang Type *Ty = Den->getType(); 717884acbb9SChangpeng Fang if (!Ty->isFloatTy()) 718884acbb9SChangpeng Fang return nullptr; 719df61be70SStanislav Mekhanoshin 720884acbb9SChangpeng Fang bool NumIsOne = false; 721884acbb9SChangpeng Fang if (const ConstantFP *CNum = dyn_cast<ConstantFP>(Num)) { 722884acbb9SChangpeng Fang if (CNum->isExactlyValue(+1.0) || CNum->isExactlyValue(-1.0)) 723884acbb9SChangpeng Fang NumIsOne = true; 724a1fe17c9SMatt Arsenault } 725a1fe17c9SMatt Arsenault 726884acbb9SChangpeng Fang // fdiv does not support denormals. But 1.0/x is always fine to use it. 727884acbb9SChangpeng Fang if (HasDenormals && !NumIsOne) 728884acbb9SChangpeng Fang return nullptr; 72925315359SChangpeng Fang 730884acbb9SChangpeng Fang Function *Decl = Intrinsic::getDeclaration(Mod, Intrinsic::amdgcn_fdiv_fast); 731884acbb9SChangpeng Fang return Builder.CreateCall(Decl, { Num, Den }); 732884acbb9SChangpeng Fang } 733884acbb9SChangpeng Fang 734884acbb9SChangpeng Fang // Optimizations is performed based on fpmath, fast math flags as well as 735884acbb9SChangpeng Fang // denormals to optimize fdiv with either rcp or fdiv.fast. 73625315359SChangpeng Fang // 737884acbb9SChangpeng Fang // With rcp: 738884acbb9SChangpeng Fang // 1/x -> rcp(x) when rcp is sufficiently accurate or inaccurate rcp is 739884acbb9SChangpeng Fang // allowed with unsafe-fp-math or afn. 74025315359SChangpeng Fang // 741884acbb9SChangpeng Fang // a/b -> a*rcp(b) when inaccurate rcp is allowed with unsafe-fp-math or afn. 74225315359SChangpeng Fang // 743884acbb9SChangpeng Fang // With fdiv.fast: 744884acbb9SChangpeng Fang // a/b -> fdiv.fast(a, b) when !fpmath >= 2.5ulp with denormals flushed. 74525315359SChangpeng Fang // 746884acbb9SChangpeng Fang // 1/x -> fdiv.fast(1,x) when !fpmath >= 2.5ulp. 747884acbb9SChangpeng Fang // 748884acbb9SChangpeng Fang // NOTE: rcp is the preference in cases that both are legal. 749a1fe17c9SMatt Arsenault bool AMDGPUCodeGenPrepare::visitFDiv(BinaryOperator &FDiv) { 750a1fe17c9SMatt Arsenault 75125315359SChangpeng Fang Type *Ty = FDiv.getType()->getScalarType(); 752a1fe17c9SMatt Arsenault 75325315359SChangpeng Fang // No intrinsic for fdiv16 if target does not support f16. 75425315359SChangpeng Fang if (Ty->isHalfTy() && !ST->has16BitInsts()) 755a1fe17c9SMatt Arsenault return false; 756a1fe17c9SMatt Arsenault 757a1fe17c9SMatt Arsenault const FPMathOperator *FPOp = cast<const FPMathOperator>(&FDiv); 758884acbb9SChangpeng Fang const float ReqdAccuracy = FPOp->getFPAccuracy(); 759a1fe17c9SMatt Arsenault 760884acbb9SChangpeng Fang // Inaccurate rcp is allowed with unsafe-fp-math or afn. 761a1fe17c9SMatt Arsenault FastMathFlags FMF = FPOp->getFastMathFlags(); 762884acbb9SChangpeng Fang const bool AllowInaccurateRcp = HasUnsafeFPMath || FMF.approxFunc(); 7639d7b1c9dSStanislav Mekhanoshin 764884acbb9SChangpeng Fang // rcp_f16 is accurate for !fpmath >= 1.0ulp. 765884acbb9SChangpeng Fang // rcp_f32 is accurate for !fpmath >= 1.0ulp and denormals are flushed. 766884acbb9SChangpeng Fang // rcp_f64 is never accurate. 767884acbb9SChangpeng Fang const bool RcpIsAccurate = (Ty->isHalfTy() && ReqdAccuracy >= 1.0f) || 768884acbb9SChangpeng Fang (Ty->isFloatTy() && !HasFP32Denormals && ReqdAccuracy >= 1.0f); 769a1fe17c9SMatt Arsenault 77025315359SChangpeng Fang IRBuilder<> Builder(FDiv.getParent(), std::next(FDiv.getIterator())); 771a1fe17c9SMatt Arsenault Builder.setFastMathFlags(FMF); 772a1fe17c9SMatt Arsenault Builder.SetCurrentDebugLocation(FDiv.getDebugLoc()); 773a1fe17c9SMatt Arsenault 774a1fe17c9SMatt Arsenault Value *Num = FDiv.getOperand(0); 775a1fe17c9SMatt Arsenault Value *Den = FDiv.getOperand(1); 776a1fe17c9SMatt Arsenault 777a1fe17c9SMatt Arsenault Value *NewFDiv = nullptr; 77825315359SChangpeng Fang if (VectorType *VT = dyn_cast<VectorType>(FDiv.getType())) { 779a1fe17c9SMatt Arsenault NewFDiv = UndefValue::get(VT); 780a1fe17c9SMatt Arsenault 781a1fe17c9SMatt Arsenault // FIXME: Doesn't do the right thing for cases where the vector is partially 782a1fe17c9SMatt Arsenault // constant. This works when the scalarizer pass is run first. 783a1fe17c9SMatt Arsenault for (unsigned I = 0, E = VT->getNumElements(); I != E; ++I) { 784a1fe17c9SMatt Arsenault Value *NumEltI = Builder.CreateExtractElement(Num, I); 785a1fe17c9SMatt Arsenault Value *DenEltI = Builder.CreateExtractElement(Den, I); 786884acbb9SChangpeng Fang // Try rcp first. 787884acbb9SChangpeng Fang Value *NewElt = optimizeWithRcp(NumEltI, DenEltI, AllowInaccurateRcp, 788884acbb9SChangpeng Fang RcpIsAccurate, Builder, Mod); 789884acbb9SChangpeng Fang if (!NewElt) // Try fdiv.fast. 790884acbb9SChangpeng Fang NewElt = optimizeWithFDivFast(NumEltI, DenEltI, ReqdAccuracy, 791884acbb9SChangpeng Fang HasFP32Denormals, Builder, Mod); 792884acbb9SChangpeng Fang if (!NewElt) // Keep the original. 793884acbb9SChangpeng Fang NewElt = Builder.CreateFDiv(NumEltI, DenEltI); 794a1fe17c9SMatt Arsenault 795a1fe17c9SMatt Arsenault NewFDiv = Builder.CreateInsertElement(NewFDiv, NewElt, I); 796a1fe17c9SMatt Arsenault } 797884acbb9SChangpeng Fang } else { // Scalar FDiv. 798884acbb9SChangpeng Fang // Try rcp first. 799884acbb9SChangpeng Fang NewFDiv = optimizeWithRcp(Num, Den, AllowInaccurateRcp, RcpIsAccurate, 800884acbb9SChangpeng Fang Builder, Mod); 801884acbb9SChangpeng Fang if (!NewFDiv) { // Try fdiv.fast. 802884acbb9SChangpeng Fang NewFDiv = optimizeWithFDivFast(Num, Den, ReqdAccuracy, HasFP32Denormals, 803884acbb9SChangpeng Fang Builder, Mod); 80425315359SChangpeng Fang } 805a1fe17c9SMatt Arsenault } 806a1fe17c9SMatt Arsenault 807a1fe17c9SMatt Arsenault if (NewFDiv) { 808a1fe17c9SMatt Arsenault FDiv.replaceAllUsesWith(NewFDiv); 809a1fe17c9SMatt Arsenault NewFDiv->takeName(&FDiv); 810a1fe17c9SMatt Arsenault FDiv.eraseFromParent(); 811a1fe17c9SMatt Arsenault } 812a1fe17c9SMatt Arsenault 813df61be70SStanislav Mekhanoshin return !!NewFDiv; 814a1fe17c9SMatt Arsenault } 815a1fe17c9SMatt Arsenault 816a1fe17c9SMatt Arsenault static bool hasUnsafeFPMath(const Function &F) { 817a1fe17c9SMatt Arsenault Attribute Attr = F.getFnAttribute("unsafe-fp-math"); 818a1fe17c9SMatt Arsenault return Attr.getValueAsString() == "true"; 819a1fe17c9SMatt Arsenault } 820a1fe17c9SMatt Arsenault 82167aa18f1SStanislav Mekhanoshin static std::pair<Value*, Value*> getMul64(IRBuilder<> &Builder, 82267aa18f1SStanislav Mekhanoshin Value *LHS, Value *RHS) { 82367aa18f1SStanislav Mekhanoshin Type *I32Ty = Builder.getInt32Ty(); 82467aa18f1SStanislav Mekhanoshin Type *I64Ty = Builder.getInt64Ty(); 825e14df4b2SKonstantin Zhuravlyov 82667aa18f1SStanislav Mekhanoshin Value *LHS_EXT64 = Builder.CreateZExt(LHS, I64Ty); 82767aa18f1SStanislav Mekhanoshin Value *RHS_EXT64 = Builder.CreateZExt(RHS, I64Ty); 82867aa18f1SStanislav Mekhanoshin Value *MUL64 = Builder.CreateMul(LHS_EXT64, RHS_EXT64); 82967aa18f1SStanislav Mekhanoshin Value *Lo = Builder.CreateTrunc(MUL64, I32Ty); 83067aa18f1SStanislav Mekhanoshin Value *Hi = Builder.CreateLShr(MUL64, Builder.getInt64(32)); 83167aa18f1SStanislav Mekhanoshin Hi = Builder.CreateTrunc(Hi, I32Ty); 83267aa18f1SStanislav Mekhanoshin return std::make_pair(Lo, Hi); 83367aa18f1SStanislav Mekhanoshin } 83467aa18f1SStanislav Mekhanoshin 83567aa18f1SStanislav Mekhanoshin static Value* getMulHu(IRBuilder<> &Builder, Value *LHS, Value *RHS) { 83667aa18f1SStanislav Mekhanoshin return getMul64(Builder, LHS, RHS).second; 83767aa18f1SStanislav Mekhanoshin } 83867aa18f1SStanislav Mekhanoshin 83934d9a16eSMatt Arsenault /// Figure out how many bits are really needed for this ddivision. \p AtLeast is 84034d9a16eSMatt Arsenault /// an optimization hint to bypass the second ComputeNumSignBits call if we the 84134d9a16eSMatt Arsenault /// first one is insufficient. Returns -1 on failure. 84234d9a16eSMatt Arsenault int AMDGPUCodeGenPrepare::getDivNumBits(BinaryOperator &I, 84334d9a16eSMatt Arsenault Value *Num, Value *Den, 84434d9a16eSMatt Arsenault unsigned AtLeast, bool IsSigned) const { 84534d9a16eSMatt Arsenault const DataLayout &DL = Mod->getDataLayout(); 84634d9a16eSMatt Arsenault unsigned LHSSignBits = ComputeNumSignBits(Num, DL, 0, AC, &I); 84734d9a16eSMatt Arsenault if (LHSSignBits < AtLeast) 84834d9a16eSMatt Arsenault return -1; 84934d9a16eSMatt Arsenault 85034d9a16eSMatt Arsenault unsigned RHSSignBits = ComputeNumSignBits(Den, DL, 0, AC, &I); 85134d9a16eSMatt Arsenault if (RHSSignBits < AtLeast) 85234d9a16eSMatt Arsenault return -1; 85334d9a16eSMatt Arsenault 85434d9a16eSMatt Arsenault unsigned SignBits = std::min(LHSSignBits, RHSSignBits); 85534d9a16eSMatt Arsenault unsigned DivBits = Num->getType()->getScalarSizeInBits() - SignBits; 85634d9a16eSMatt Arsenault if (IsSigned) 85734d9a16eSMatt Arsenault ++DivBits; 85834d9a16eSMatt Arsenault return DivBits; 85934d9a16eSMatt Arsenault } 86034d9a16eSMatt Arsenault 86167aa18f1SStanislav Mekhanoshin // The fractional part of a float is enough to accurately represent up to 86267aa18f1SStanislav Mekhanoshin // a 24-bit signed integer. 86367aa18f1SStanislav Mekhanoshin Value *AMDGPUCodeGenPrepare::expandDivRem24(IRBuilder<> &Builder, 8647e7268acSStanislav Mekhanoshin BinaryOperator &I, 86567aa18f1SStanislav Mekhanoshin Value *Num, Value *Den, 86667aa18f1SStanislav Mekhanoshin bool IsDiv, bool IsSigned) const { 86734d9a16eSMatt Arsenault int DivBits = getDivNumBits(I, Num, Den, 9, IsSigned); 86834d9a16eSMatt Arsenault if (DivBits == -1) 86967aa18f1SStanislav Mekhanoshin return nullptr; 87034d9a16eSMatt Arsenault return expandDivRem24Impl(Builder, I, Num, Den, DivBits, IsDiv, IsSigned); 87134d9a16eSMatt Arsenault } 87267aa18f1SStanislav Mekhanoshin 87334d9a16eSMatt Arsenault Value *AMDGPUCodeGenPrepare::expandDivRem24Impl(IRBuilder<> &Builder, 87434d9a16eSMatt Arsenault BinaryOperator &I, 87534d9a16eSMatt Arsenault Value *Num, Value *Den, 87634d9a16eSMatt Arsenault unsigned DivBits, 87734d9a16eSMatt Arsenault bool IsDiv, bool IsSigned) const { 87867aa18f1SStanislav Mekhanoshin Type *I32Ty = Builder.getInt32Ty(); 87934d9a16eSMatt Arsenault Num = Builder.CreateTrunc(Num, I32Ty); 88034d9a16eSMatt Arsenault Den = Builder.CreateTrunc(Den, I32Ty); 88134d9a16eSMatt Arsenault 88267aa18f1SStanislav Mekhanoshin Type *F32Ty = Builder.getFloatTy(); 88367aa18f1SStanislav Mekhanoshin ConstantInt *One = Builder.getInt32(1); 88467aa18f1SStanislav Mekhanoshin Value *JQ = One; 88567aa18f1SStanislav Mekhanoshin 88667aa18f1SStanislav Mekhanoshin if (IsSigned) { 88767aa18f1SStanislav Mekhanoshin // char|short jq = ia ^ ib; 88867aa18f1SStanislav Mekhanoshin JQ = Builder.CreateXor(Num, Den); 88967aa18f1SStanislav Mekhanoshin 89067aa18f1SStanislav Mekhanoshin // jq = jq >> (bitsize - 2) 89167aa18f1SStanislav Mekhanoshin JQ = Builder.CreateAShr(JQ, Builder.getInt32(30)); 89267aa18f1SStanislav Mekhanoshin 89367aa18f1SStanislav Mekhanoshin // jq = jq | 0x1 89467aa18f1SStanislav Mekhanoshin JQ = Builder.CreateOr(JQ, One); 89567aa18f1SStanislav Mekhanoshin } 89667aa18f1SStanislav Mekhanoshin 89767aa18f1SStanislav Mekhanoshin // int ia = (int)LHS; 89867aa18f1SStanislav Mekhanoshin Value *IA = Num; 89967aa18f1SStanislav Mekhanoshin 90067aa18f1SStanislav Mekhanoshin // int ib, (int)RHS; 90167aa18f1SStanislav Mekhanoshin Value *IB = Den; 90267aa18f1SStanislav Mekhanoshin 90367aa18f1SStanislav Mekhanoshin // float fa = (float)ia; 90467aa18f1SStanislav Mekhanoshin Value *FA = IsSigned ? Builder.CreateSIToFP(IA, F32Ty) 90567aa18f1SStanislav Mekhanoshin : Builder.CreateUIToFP(IA, F32Ty); 90667aa18f1SStanislav Mekhanoshin 90767aa18f1SStanislav Mekhanoshin // float fb = (float)ib; 90867aa18f1SStanislav Mekhanoshin Value *FB = IsSigned ? Builder.CreateSIToFP(IB,F32Ty) 90967aa18f1SStanislav Mekhanoshin : Builder.CreateUIToFP(IB,F32Ty); 91067aa18f1SStanislav Mekhanoshin 91192c62582SMatt Arsenault Function *RcpDecl = Intrinsic::getDeclaration(Mod, Intrinsic::amdgcn_rcp, 91292c62582SMatt Arsenault Builder.getFloatTy()); 91392c62582SMatt Arsenault Value *RCP = Builder.CreateCall(RcpDecl, { FB }); 91467aa18f1SStanislav Mekhanoshin Value *FQM = Builder.CreateFMul(FA, RCP); 91567aa18f1SStanislav Mekhanoshin 91667aa18f1SStanislav Mekhanoshin // fq = trunc(fqm); 91757f5d0a8SNeil Henning CallInst *FQ = Builder.CreateUnaryIntrinsic(Intrinsic::trunc, FQM); 91867aa18f1SStanislav Mekhanoshin FQ->copyFastMathFlags(Builder.getFastMathFlags()); 91967aa18f1SStanislav Mekhanoshin 92067aa18f1SStanislav Mekhanoshin // float fqneg = -fq; 92167aa18f1SStanislav Mekhanoshin Value *FQNeg = Builder.CreateFNeg(FQ); 92267aa18f1SStanislav Mekhanoshin 92367aa18f1SStanislav Mekhanoshin // float fr = mad(fqneg, fb, fa); 92467aa18f1SStanislav Mekhanoshin Value *FR = Builder.CreateIntrinsic(Intrinsic::amdgcn_fmad_ftz, 92557f5d0a8SNeil Henning {FQNeg->getType()}, {FQNeg, FB, FA}, FQ); 92667aa18f1SStanislav Mekhanoshin 92767aa18f1SStanislav Mekhanoshin // int iq = (int)fq; 92867aa18f1SStanislav Mekhanoshin Value *IQ = IsSigned ? Builder.CreateFPToSI(FQ, I32Ty) 92967aa18f1SStanislav Mekhanoshin : Builder.CreateFPToUI(FQ, I32Ty); 93067aa18f1SStanislav Mekhanoshin 93167aa18f1SStanislav Mekhanoshin // fr = fabs(fr); 93257f5d0a8SNeil Henning FR = Builder.CreateUnaryIntrinsic(Intrinsic::fabs, FR, FQ); 93367aa18f1SStanislav Mekhanoshin 93467aa18f1SStanislav Mekhanoshin // fb = fabs(fb); 93557f5d0a8SNeil Henning FB = Builder.CreateUnaryIntrinsic(Intrinsic::fabs, FB, FQ); 93667aa18f1SStanislav Mekhanoshin 93767aa18f1SStanislav Mekhanoshin // int cv = fr >= fb; 93867aa18f1SStanislav Mekhanoshin Value *CV = Builder.CreateFCmpOGE(FR, FB); 93967aa18f1SStanislav Mekhanoshin 94067aa18f1SStanislav Mekhanoshin // jq = (cv ? jq : 0); 94167aa18f1SStanislav Mekhanoshin JQ = Builder.CreateSelect(CV, JQ, Builder.getInt32(0)); 94267aa18f1SStanislav Mekhanoshin 94367aa18f1SStanislav Mekhanoshin // dst = iq + jq; 94467aa18f1SStanislav Mekhanoshin Value *Div = Builder.CreateAdd(IQ, JQ); 94567aa18f1SStanislav Mekhanoshin 94667aa18f1SStanislav Mekhanoshin Value *Res = Div; 94767aa18f1SStanislav Mekhanoshin if (!IsDiv) { 94867aa18f1SStanislav Mekhanoshin // Rem needs compensation, it's easier to recompute it 94967aa18f1SStanislav Mekhanoshin Value *Rem = Builder.CreateMul(Div, Den); 95067aa18f1SStanislav Mekhanoshin Res = Builder.CreateSub(Num, Rem); 95167aa18f1SStanislav Mekhanoshin } 95267aa18f1SStanislav Mekhanoshin 95334d9a16eSMatt Arsenault if (DivBits != 0 && DivBits < 32) { 954e5823bf8SMatt Arsenault // Extend in register from the number of bits this divide really is. 95567aa18f1SStanislav Mekhanoshin if (IsSigned) { 95634d9a16eSMatt Arsenault int InRegBits = 32 - DivBits; 95734d9a16eSMatt Arsenault 95834d9a16eSMatt Arsenault Res = Builder.CreateShl(Res, InRegBits); 95934d9a16eSMatt Arsenault Res = Builder.CreateAShr(Res, InRegBits); 96067aa18f1SStanislav Mekhanoshin } else { 96134d9a16eSMatt Arsenault ConstantInt *TruncMask 96234d9a16eSMatt Arsenault = Builder.getInt32((UINT64_C(1) << DivBits) - 1); 96367aa18f1SStanislav Mekhanoshin Res = Builder.CreateAnd(Res, TruncMask); 96467aa18f1SStanislav Mekhanoshin } 96534d9a16eSMatt Arsenault } 96667aa18f1SStanislav Mekhanoshin 96767aa18f1SStanislav Mekhanoshin return Res; 96867aa18f1SStanislav Mekhanoshin } 96967aa18f1SStanislav Mekhanoshin 970b30e1223SMatt Arsenault // Try to recognize special cases the DAG will emit special, better expansions 971b30e1223SMatt Arsenault // than the general expansion we do here. 972b30e1223SMatt Arsenault 973b30e1223SMatt Arsenault // TODO: It would be better to just directly handle those optimizations here. 974b30e1223SMatt Arsenault bool AMDGPUCodeGenPrepare::divHasSpecialOptimization( 975b30e1223SMatt Arsenault BinaryOperator &I, Value *Num, Value *Den) const { 976b30e1223SMatt Arsenault if (Constant *C = dyn_cast<Constant>(Den)) { 977b30e1223SMatt Arsenault // Arbitrary constants get a better expansion as long as a wider mulhi is 978b30e1223SMatt Arsenault // legal. 979b30e1223SMatt Arsenault if (C->getType()->getScalarSizeInBits() <= 32) 980b30e1223SMatt Arsenault return true; 981b30e1223SMatt Arsenault 982b30e1223SMatt Arsenault // TODO: Sdiv check for not exact for some reason. 983b30e1223SMatt Arsenault 984b30e1223SMatt Arsenault // If there's no wider mulhi, there's only a better expansion for powers of 985b30e1223SMatt Arsenault // two. 986b30e1223SMatt Arsenault // TODO: Should really know for each vector element. 987b30e1223SMatt Arsenault if (isKnownToBeAPowerOfTwo(C, *DL, true, 0, AC, &I, DT)) 988b30e1223SMatt Arsenault return true; 989b30e1223SMatt Arsenault 990b30e1223SMatt Arsenault return false; 991b30e1223SMatt Arsenault } 992b30e1223SMatt Arsenault 993b30e1223SMatt Arsenault if (BinaryOperator *BinOpDen = dyn_cast<BinaryOperator>(Den)) { 994b30e1223SMatt Arsenault // fold (udiv x, (shl c, y)) -> x >>u (log2(c)+y) iff c is power of 2 995b30e1223SMatt Arsenault if (BinOpDen->getOpcode() == Instruction::Shl && 996b30e1223SMatt Arsenault isa<Constant>(BinOpDen->getOperand(0)) && 997b30e1223SMatt Arsenault isKnownToBeAPowerOfTwo(BinOpDen->getOperand(0), *DL, true, 998b30e1223SMatt Arsenault 0, AC, &I, DT)) { 999b30e1223SMatt Arsenault return true; 1000b30e1223SMatt Arsenault } 1001b30e1223SMatt Arsenault } 1002b30e1223SMatt Arsenault 1003b30e1223SMatt Arsenault return false; 1004b30e1223SMatt Arsenault } 1005b30e1223SMatt Arsenault 100667aa18f1SStanislav Mekhanoshin Value* AMDGPUCodeGenPrepare::expandDivRem32(IRBuilder<> &Builder, 10077e7268acSStanislav Mekhanoshin BinaryOperator &I, 100867aa18f1SStanislav Mekhanoshin Value *Num, Value *Den) const { 10097e7268acSStanislav Mekhanoshin Instruction::BinaryOps Opc = I.getOpcode(); 101067aa18f1SStanislav Mekhanoshin assert(Opc == Instruction::URem || Opc == Instruction::UDiv || 101167aa18f1SStanislav Mekhanoshin Opc == Instruction::SRem || Opc == Instruction::SDiv); 101267aa18f1SStanislav Mekhanoshin 101367aa18f1SStanislav Mekhanoshin FastMathFlags FMF; 101467aa18f1SStanislav Mekhanoshin FMF.setFast(); 101567aa18f1SStanislav Mekhanoshin Builder.setFastMathFlags(FMF); 101667aa18f1SStanislav Mekhanoshin 1017b30e1223SMatt Arsenault if (divHasSpecialOptimization(I, Num, Den)) 1018b30e1223SMatt Arsenault return nullptr; // Keep it for later optimization. 101967aa18f1SStanislav Mekhanoshin 102067aa18f1SStanislav Mekhanoshin bool IsDiv = Opc == Instruction::UDiv || Opc == Instruction::SDiv; 102167aa18f1SStanislav Mekhanoshin bool IsSigned = Opc == Instruction::SRem || Opc == Instruction::SDiv; 102267aa18f1SStanislav Mekhanoshin 102367aa18f1SStanislav Mekhanoshin Type *Ty = Num->getType(); 102467aa18f1SStanislav Mekhanoshin Type *I32Ty = Builder.getInt32Ty(); 102567aa18f1SStanislav Mekhanoshin Type *F32Ty = Builder.getFloatTy(); 102667aa18f1SStanislav Mekhanoshin 102767aa18f1SStanislav Mekhanoshin if (Ty->getScalarSizeInBits() < 32) { 102867aa18f1SStanislav Mekhanoshin if (IsSigned) { 102967aa18f1SStanislav Mekhanoshin Num = Builder.CreateSExt(Num, I32Ty); 103067aa18f1SStanislav Mekhanoshin Den = Builder.CreateSExt(Den, I32Ty); 103167aa18f1SStanislav Mekhanoshin } else { 103267aa18f1SStanislav Mekhanoshin Num = Builder.CreateZExt(Num, I32Ty); 103367aa18f1SStanislav Mekhanoshin Den = Builder.CreateZExt(Den, I32Ty); 103467aa18f1SStanislav Mekhanoshin } 103567aa18f1SStanislav Mekhanoshin } 103667aa18f1SStanislav Mekhanoshin 10377e7268acSStanislav Mekhanoshin if (Value *Res = expandDivRem24(Builder, I, Num, Den, IsDiv, IsSigned)) { 103834d9a16eSMatt Arsenault return IsSigned ? Builder.CreateSExtOrTrunc(Res, Ty) : 103934d9a16eSMatt Arsenault Builder.CreateZExtOrTrunc(Res, Ty); 104067aa18f1SStanislav Mekhanoshin } 104167aa18f1SStanislav Mekhanoshin 104267aa18f1SStanislav Mekhanoshin ConstantInt *Zero = Builder.getInt32(0); 104367aa18f1SStanislav Mekhanoshin ConstantInt *One = Builder.getInt32(1); 104467aa18f1SStanislav Mekhanoshin 104567aa18f1SStanislav Mekhanoshin Value *Sign = nullptr; 104667aa18f1SStanislav Mekhanoshin if (IsSigned) { 104767aa18f1SStanislav Mekhanoshin ConstantInt *K31 = Builder.getInt32(31); 104867aa18f1SStanislav Mekhanoshin Value *LHSign = Builder.CreateAShr(Num, K31); 104967aa18f1SStanislav Mekhanoshin Value *RHSign = Builder.CreateAShr(Den, K31); 105067aa18f1SStanislav Mekhanoshin // Remainder sign is the same as LHS 105167aa18f1SStanislav Mekhanoshin Sign = IsDiv ? Builder.CreateXor(LHSign, RHSign) : LHSign; 105267aa18f1SStanislav Mekhanoshin 105367aa18f1SStanislav Mekhanoshin Num = Builder.CreateAdd(Num, LHSign); 105467aa18f1SStanislav Mekhanoshin Den = Builder.CreateAdd(Den, RHSign); 105567aa18f1SStanislav Mekhanoshin 105667aa18f1SStanislav Mekhanoshin Num = Builder.CreateXor(Num, LHSign); 105767aa18f1SStanislav Mekhanoshin Den = Builder.CreateXor(Den, RHSign); 105867aa18f1SStanislav Mekhanoshin } 105967aa18f1SStanislav Mekhanoshin 106067aa18f1SStanislav Mekhanoshin // RCP = URECIP(Den) = 2^32 / Den + e 106167aa18f1SStanislav Mekhanoshin // e is rounding error. 106267aa18f1SStanislav Mekhanoshin Value *DEN_F32 = Builder.CreateUIToFP(Den, F32Ty); 106392c62582SMatt Arsenault 106492c62582SMatt Arsenault Function *RcpDecl = Intrinsic::getDeclaration(Mod, Intrinsic::amdgcn_rcp, 106592c62582SMatt Arsenault Builder.getFloatTy()); 106692c62582SMatt Arsenault Value *RCP_F32 = Builder.CreateCall(RcpDecl, { DEN_F32 }); 106767aa18f1SStanislav Mekhanoshin Constant *UINT_MAX_PLUS_1 = ConstantFP::get(F32Ty, BitsToFloat(0x4f800000)); 106867aa18f1SStanislav Mekhanoshin Value *RCP_SCALE = Builder.CreateFMul(RCP_F32, UINT_MAX_PLUS_1); 106967aa18f1SStanislav Mekhanoshin Value *RCP = Builder.CreateFPToUI(RCP_SCALE, I32Ty); 107067aa18f1SStanislav Mekhanoshin 107167aa18f1SStanislav Mekhanoshin // RCP_LO, RCP_HI = mul(RCP, Den) */ 107267aa18f1SStanislav Mekhanoshin Value *RCP_LO, *RCP_HI; 107367aa18f1SStanislav Mekhanoshin std::tie(RCP_LO, RCP_HI) = getMul64(Builder, RCP, Den); 107467aa18f1SStanislav Mekhanoshin 107567aa18f1SStanislav Mekhanoshin // NEG_RCP_LO = -RCP_LO 107667aa18f1SStanislav Mekhanoshin Value *NEG_RCP_LO = Builder.CreateNeg(RCP_LO); 107767aa18f1SStanislav Mekhanoshin 107867aa18f1SStanislav Mekhanoshin // ABS_RCP_LO = (RCP_HI == 0 ? NEG_RCP_LO : RCP_LO) 107967aa18f1SStanislav Mekhanoshin Value *RCP_HI_0_CC = Builder.CreateICmpEQ(RCP_HI, Zero); 108067aa18f1SStanislav Mekhanoshin Value *ABS_RCP_LO = Builder.CreateSelect(RCP_HI_0_CC, NEG_RCP_LO, RCP_LO); 108167aa18f1SStanislav Mekhanoshin 108267aa18f1SStanislav Mekhanoshin // Calculate the rounding error from the URECIP instruction 108367aa18f1SStanislav Mekhanoshin // E = mulhu(ABS_RCP_LO, RCP) 108467aa18f1SStanislav Mekhanoshin Value *E = getMulHu(Builder, ABS_RCP_LO, RCP); 108567aa18f1SStanislav Mekhanoshin 108667aa18f1SStanislav Mekhanoshin // RCP_A_E = RCP + E 108767aa18f1SStanislav Mekhanoshin Value *RCP_A_E = Builder.CreateAdd(RCP, E); 108867aa18f1SStanislav Mekhanoshin 108967aa18f1SStanislav Mekhanoshin // RCP_S_E = RCP - E 109067aa18f1SStanislav Mekhanoshin Value *RCP_S_E = Builder.CreateSub(RCP, E); 109167aa18f1SStanislav Mekhanoshin 109267aa18f1SStanislav Mekhanoshin // Tmp0 = (RCP_HI == 0 ? RCP_A_E : RCP_SUB_E) 109367aa18f1SStanislav Mekhanoshin Value *Tmp0 = Builder.CreateSelect(RCP_HI_0_CC, RCP_A_E, RCP_S_E); 109467aa18f1SStanislav Mekhanoshin 109567aa18f1SStanislav Mekhanoshin // Quotient = mulhu(Tmp0, Num) 109667aa18f1SStanislav Mekhanoshin Value *Quotient = getMulHu(Builder, Tmp0, Num); 109767aa18f1SStanislav Mekhanoshin 109867aa18f1SStanislav Mekhanoshin // Num_S_Remainder = Quotient * Den 109967aa18f1SStanislav Mekhanoshin Value *Num_S_Remainder = Builder.CreateMul(Quotient, Den); 110067aa18f1SStanislav Mekhanoshin 110167aa18f1SStanislav Mekhanoshin // Remainder = Num - Num_S_Remainder 110267aa18f1SStanislav Mekhanoshin Value *Remainder = Builder.CreateSub(Num, Num_S_Remainder); 110367aa18f1SStanislav Mekhanoshin 11046d4ebadaSMatt Arsenault // Remainder_GE_Den = Remainder >= Den; 11056d4ebadaSMatt Arsenault Value *Remainder_GE_Den = Builder.CreateICmpUGE(Remainder, Den); 110667aa18f1SStanislav Mekhanoshin 11076d4ebadaSMatt Arsenault // Remainder_GE_Zero = Num >= Num_S_Remainder 11086d4ebadaSMatt Arsenault Value *Remainder_GE_Zero = Builder.CreateICmpUGE(Num, Num_S_Remainder); 110967aa18f1SStanislav Mekhanoshin 111067aa18f1SStanislav Mekhanoshin // Tmp1 = Remainder_GE_Den & Remainder_GE_Zero 111167aa18f1SStanislav Mekhanoshin Value *Tmp1 = Builder.CreateAnd(Remainder_GE_Den, Remainder_GE_Zero); 111267aa18f1SStanislav Mekhanoshin 111367aa18f1SStanislav Mekhanoshin Value *Res; 111467aa18f1SStanislav Mekhanoshin if (IsDiv) { 111567aa18f1SStanislav Mekhanoshin // Quotient_A_One = Quotient + 1 111667aa18f1SStanislav Mekhanoshin Value *Quotient_A_One = Builder.CreateAdd(Quotient, One); 111767aa18f1SStanislav Mekhanoshin 111867aa18f1SStanislav Mekhanoshin // Quotient_S_One = Quotient - 1 111967aa18f1SStanislav Mekhanoshin Value *Quotient_S_One = Builder.CreateSub(Quotient, One); 112067aa18f1SStanislav Mekhanoshin 11216d4ebadaSMatt Arsenault // Div = (Tmp1 ? Quotient_A_One : Quotient) 11226d4ebadaSMatt Arsenault Value *Div = Builder.CreateSelect(Tmp1, Quotient_A_One, Quotient); 112367aa18f1SStanislav Mekhanoshin 11246d4ebadaSMatt Arsenault // Div = (Remainder_GE_Zero ? Div : Quotient_S_One) 11256d4ebadaSMatt Arsenault Res = Builder.CreateSelect(Remainder_GE_Zero, Div, Quotient_S_One); 112667aa18f1SStanislav Mekhanoshin } else { 112767aa18f1SStanislav Mekhanoshin // Remainder_S_Den = Remainder - Den 112867aa18f1SStanislav Mekhanoshin Value *Remainder_S_Den = Builder.CreateSub(Remainder, Den); 112967aa18f1SStanislav Mekhanoshin 113067aa18f1SStanislav Mekhanoshin // Remainder_A_Den = Remainder + Den 113167aa18f1SStanislav Mekhanoshin Value *Remainder_A_Den = Builder.CreateAdd(Remainder, Den); 113267aa18f1SStanislav Mekhanoshin 11336d4ebadaSMatt Arsenault // Rem = (Tmp1 ? Remainder_S_Den : Remainder) 11346d4ebadaSMatt Arsenault Value *Rem = Builder.CreateSelect(Tmp1, Remainder_S_Den, Remainder); 113567aa18f1SStanislav Mekhanoshin 11366d4ebadaSMatt Arsenault // Rem = (Remainder_GE_Zero ? Rem : Remainder_A_Den) 11376d4ebadaSMatt Arsenault Res = Builder.CreateSelect(Remainder_GE_Zero, Rem, Remainder_A_Den); 113867aa18f1SStanislav Mekhanoshin } 113967aa18f1SStanislav Mekhanoshin 114067aa18f1SStanislav Mekhanoshin if (IsSigned) { 114167aa18f1SStanislav Mekhanoshin Res = Builder.CreateXor(Res, Sign); 114267aa18f1SStanislav Mekhanoshin Res = Builder.CreateSub(Res, Sign); 114367aa18f1SStanislav Mekhanoshin } 114467aa18f1SStanislav Mekhanoshin 114567aa18f1SStanislav Mekhanoshin Res = Builder.CreateTrunc(Res, Ty); 114667aa18f1SStanislav Mekhanoshin 114767aa18f1SStanislav Mekhanoshin return Res; 114867aa18f1SStanislav Mekhanoshin } 114967aa18f1SStanislav Mekhanoshin 115034d9a16eSMatt Arsenault Value *AMDGPUCodeGenPrepare::shrinkDivRem64(IRBuilder<> &Builder, 115134d9a16eSMatt Arsenault BinaryOperator &I, 115234d9a16eSMatt Arsenault Value *Num, Value *Den) const { 115334d9a16eSMatt Arsenault if (!ExpandDiv64InIR && divHasSpecialOptimization(I, Num, Den)) 115434d9a16eSMatt Arsenault return nullptr; // Keep it for later optimization. 115534d9a16eSMatt Arsenault 115634d9a16eSMatt Arsenault Instruction::BinaryOps Opc = I.getOpcode(); 115734d9a16eSMatt Arsenault 115834d9a16eSMatt Arsenault bool IsDiv = Opc == Instruction::SDiv || Opc == Instruction::UDiv; 115934d9a16eSMatt Arsenault bool IsSigned = Opc == Instruction::SDiv || Opc == Instruction::SRem; 116034d9a16eSMatt Arsenault 116134d9a16eSMatt Arsenault int NumDivBits = getDivNumBits(I, Num, Den, 32, IsSigned); 116234d9a16eSMatt Arsenault if (NumDivBits == -1) 116334d9a16eSMatt Arsenault return nullptr; 116434d9a16eSMatt Arsenault 116534d9a16eSMatt Arsenault Value *Narrowed = nullptr; 116634d9a16eSMatt Arsenault if (NumDivBits <= 24) { 116734d9a16eSMatt Arsenault Narrowed = expandDivRem24Impl(Builder, I, Num, Den, NumDivBits, 116834d9a16eSMatt Arsenault IsDiv, IsSigned); 116934d9a16eSMatt Arsenault } else if (NumDivBits <= 32) { 117034d9a16eSMatt Arsenault Narrowed = expandDivRem32(Builder, I, Num, Den); 117134d9a16eSMatt Arsenault } 117234d9a16eSMatt Arsenault 117334d9a16eSMatt Arsenault if (Narrowed) { 117434d9a16eSMatt Arsenault return IsSigned ? Builder.CreateSExt(Narrowed, Num->getType()) : 117534d9a16eSMatt Arsenault Builder.CreateZExt(Narrowed, Num->getType()); 117634d9a16eSMatt Arsenault } 117734d9a16eSMatt Arsenault 117834d9a16eSMatt Arsenault return nullptr; 117934d9a16eSMatt Arsenault } 118034d9a16eSMatt Arsenault 118134d9a16eSMatt Arsenault void AMDGPUCodeGenPrepare::expandDivRem64(BinaryOperator &I) const { 118234d9a16eSMatt Arsenault Instruction::BinaryOps Opc = I.getOpcode(); 118334d9a16eSMatt Arsenault // Do the general expansion. 118434d9a16eSMatt Arsenault if (Opc == Instruction::UDiv || Opc == Instruction::SDiv) { 118534d9a16eSMatt Arsenault expandDivisionUpTo64Bits(&I); 118634d9a16eSMatt Arsenault return; 118734d9a16eSMatt Arsenault } 118834d9a16eSMatt Arsenault 118934d9a16eSMatt Arsenault if (Opc == Instruction::URem || Opc == Instruction::SRem) { 119034d9a16eSMatt Arsenault expandRemainderUpTo64Bits(&I); 119134d9a16eSMatt Arsenault return; 119234d9a16eSMatt Arsenault } 119334d9a16eSMatt Arsenault 119434d9a16eSMatt Arsenault llvm_unreachable("not a division"); 119534d9a16eSMatt Arsenault } 119634d9a16eSMatt Arsenault 119767aa18f1SStanislav Mekhanoshin bool AMDGPUCodeGenPrepare::visitBinaryOperator(BinaryOperator &I) { 1198bcd91778SMatt Arsenault if (foldBinOpIntoSelect(I)) 1199bcd91778SMatt Arsenault return true; 1200bcd91778SMatt Arsenault 1201f74fc60aSKonstantin Zhuravlyov if (ST->has16BitInsts() && needsPromotionToI32(I.getType()) && 120267aa18f1SStanislav Mekhanoshin DA->isUniform(&I) && promoteUniformOpToI32(I)) 120367aa18f1SStanislav Mekhanoshin return true; 120467aa18f1SStanislav Mekhanoshin 1205b3dd381aSMatt Arsenault if (UseMul24Intrin && replaceMulWithMul24(I)) 120649169a96SMatt Arsenault return true; 120749169a96SMatt Arsenault 120867aa18f1SStanislav Mekhanoshin bool Changed = false; 120967aa18f1SStanislav Mekhanoshin Instruction::BinaryOps Opc = I.getOpcode(); 121067aa18f1SStanislav Mekhanoshin Type *Ty = I.getType(); 121167aa18f1SStanislav Mekhanoshin Value *NewDiv = nullptr; 121234d9a16eSMatt Arsenault unsigned ScalarSize = Ty->getScalarSizeInBits(); 121334d9a16eSMatt Arsenault 121434d9a16eSMatt Arsenault SmallVector<BinaryOperator *, 8> Div64ToExpand; 121534d9a16eSMatt Arsenault 121667aa18f1SStanislav Mekhanoshin if ((Opc == Instruction::URem || Opc == Instruction::UDiv || 121767aa18f1SStanislav Mekhanoshin Opc == Instruction::SRem || Opc == Instruction::SDiv) && 12189ec66860SMatt Arsenault ScalarSize <= 64 && 12199ec66860SMatt Arsenault !DisableIDivExpand) { 122067aa18f1SStanislav Mekhanoshin Value *Num = I.getOperand(0); 122167aa18f1SStanislav Mekhanoshin Value *Den = I.getOperand(1); 122267aa18f1SStanislav Mekhanoshin IRBuilder<> Builder(&I); 122367aa18f1SStanislav Mekhanoshin Builder.SetCurrentDebugLocation(I.getDebugLoc()); 122467aa18f1SStanislav Mekhanoshin 122567aa18f1SStanislav Mekhanoshin if (VectorType *VT = dyn_cast<VectorType>(Ty)) { 122667aa18f1SStanislav Mekhanoshin NewDiv = UndefValue::get(VT); 122767aa18f1SStanislav Mekhanoshin 12287e7268acSStanislav Mekhanoshin for (unsigned N = 0, E = VT->getNumElements(); N != E; ++N) { 12297e7268acSStanislav Mekhanoshin Value *NumEltN = Builder.CreateExtractElement(Num, N); 12307e7268acSStanislav Mekhanoshin Value *DenEltN = Builder.CreateExtractElement(Den, N); 123134d9a16eSMatt Arsenault 123234d9a16eSMatt Arsenault Value *NewElt; 123334d9a16eSMatt Arsenault if (ScalarSize <= 32) { 123434d9a16eSMatt Arsenault NewElt = expandDivRem32(Builder, I, NumEltN, DenEltN); 123567aa18f1SStanislav Mekhanoshin if (!NewElt) 12367e7268acSStanislav Mekhanoshin NewElt = Builder.CreateBinOp(Opc, NumEltN, DenEltN); 123734d9a16eSMatt Arsenault } else { 123834d9a16eSMatt Arsenault // See if this 64-bit division can be shrunk to 32/24-bits before 123934d9a16eSMatt Arsenault // producing the general expansion. 124034d9a16eSMatt Arsenault NewElt = shrinkDivRem64(Builder, I, NumEltN, DenEltN); 124134d9a16eSMatt Arsenault if (!NewElt) { 124234d9a16eSMatt Arsenault // The general 64-bit expansion introduces control flow and doesn't 124334d9a16eSMatt Arsenault // return the new value. Just insert a scalar copy and defer 124434d9a16eSMatt Arsenault // expanding it. 124534d9a16eSMatt Arsenault NewElt = Builder.CreateBinOp(Opc, NumEltN, DenEltN); 124634d9a16eSMatt Arsenault Div64ToExpand.push_back(cast<BinaryOperator>(NewElt)); 124734d9a16eSMatt Arsenault } 124834d9a16eSMatt Arsenault } 124934d9a16eSMatt Arsenault 12507e7268acSStanislav Mekhanoshin NewDiv = Builder.CreateInsertElement(NewDiv, NewElt, N); 125167aa18f1SStanislav Mekhanoshin } 125267aa18f1SStanislav Mekhanoshin } else { 125334d9a16eSMatt Arsenault if (ScalarSize <= 32) 12547e7268acSStanislav Mekhanoshin NewDiv = expandDivRem32(Builder, I, Num, Den); 125534d9a16eSMatt Arsenault else { 125634d9a16eSMatt Arsenault NewDiv = shrinkDivRem64(Builder, I, Num, Den); 125734d9a16eSMatt Arsenault if (!NewDiv) 125834d9a16eSMatt Arsenault Div64ToExpand.push_back(&I); 125934d9a16eSMatt Arsenault } 126067aa18f1SStanislav Mekhanoshin } 126167aa18f1SStanislav Mekhanoshin 126267aa18f1SStanislav Mekhanoshin if (NewDiv) { 126367aa18f1SStanislav Mekhanoshin I.replaceAllUsesWith(NewDiv); 126467aa18f1SStanislav Mekhanoshin I.eraseFromParent(); 126567aa18f1SStanislav Mekhanoshin Changed = true; 126667aa18f1SStanislav Mekhanoshin } 126767aa18f1SStanislav Mekhanoshin } 1268e14df4b2SKonstantin Zhuravlyov 126934d9a16eSMatt Arsenault if (ExpandDiv64InIR) { 127034d9a16eSMatt Arsenault // TODO: We get much worse code in specially handled constant cases. 127134d9a16eSMatt Arsenault for (BinaryOperator *Div : Div64ToExpand) { 127234d9a16eSMatt Arsenault expandDivRem64(*Div); 127334d9a16eSMatt Arsenault Changed = true; 127434d9a16eSMatt Arsenault } 127534d9a16eSMatt Arsenault } 127634d9a16eSMatt Arsenault 1277e14df4b2SKonstantin Zhuravlyov return Changed; 1278e14df4b2SKonstantin Zhuravlyov } 1279e14df4b2SKonstantin Zhuravlyov 1280a126a13bSWei Ding bool AMDGPUCodeGenPrepare::visitLoadInst(LoadInst &I) { 128190083d30SMatt Arsenault if (!WidenLoads) 128290083d30SMatt Arsenault return false; 128390083d30SMatt Arsenault 12840da6350dSMatt Arsenault if ((I.getPointerAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS || 12850da6350dSMatt Arsenault I.getPointerAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT) && 1286a126a13bSWei Ding canWidenScalarExtLoad(I)) { 1287a126a13bSWei Ding IRBuilder<> Builder(&I); 1288a126a13bSWei Ding Builder.SetCurrentDebugLocation(I.getDebugLoc()); 1289a126a13bSWei Ding 1290a126a13bSWei Ding Type *I32Ty = Builder.getInt32Ty(); 1291a126a13bSWei Ding Type *PT = PointerType::get(I32Ty, I.getPointerAddressSpace()); 1292a126a13bSWei Ding Value *BitCast= Builder.CreateBitCast(I.getPointerOperand(), PT); 129314359ef1SJames Y Knight LoadInst *WidenLoad = Builder.CreateLoad(I32Ty, BitCast); 129457e541e8SMatt Arsenault WidenLoad->copyMetadata(I); 129557e541e8SMatt Arsenault 129657e541e8SMatt Arsenault // If we have range metadata, we need to convert the type, and not make 129757e541e8SMatt Arsenault // assumptions about the high bits. 129857e541e8SMatt Arsenault if (auto *Range = WidenLoad->getMetadata(LLVMContext::MD_range)) { 129957e541e8SMatt Arsenault ConstantInt *Lower = 130057e541e8SMatt Arsenault mdconst::extract<ConstantInt>(Range->getOperand(0)); 130157e541e8SMatt Arsenault 130257e541e8SMatt Arsenault if (Lower->getValue().isNullValue()) { 130357e541e8SMatt Arsenault WidenLoad->setMetadata(LLVMContext::MD_range, nullptr); 130457e541e8SMatt Arsenault } else { 130557e541e8SMatt Arsenault Metadata *LowAndHigh[] = { 130657e541e8SMatt Arsenault ConstantAsMetadata::get(ConstantInt::get(I32Ty, Lower->getValue().zext(32))), 130757e541e8SMatt Arsenault // Don't make assumptions about the high bits. 130857e541e8SMatt Arsenault ConstantAsMetadata::get(ConstantInt::get(I32Ty, 0)) 130957e541e8SMatt Arsenault }; 131057e541e8SMatt Arsenault 131157e541e8SMatt Arsenault WidenLoad->setMetadata(LLVMContext::MD_range, 131257e541e8SMatt Arsenault MDNode::get(Mod->getContext(), LowAndHigh)); 131357e541e8SMatt Arsenault } 131457e541e8SMatt Arsenault } 1315a126a13bSWei Ding 1316a126a13bSWei Ding int TySize = Mod->getDataLayout().getTypeSizeInBits(I.getType()); 1317a126a13bSWei Ding Type *IntNTy = Builder.getIntNTy(TySize); 1318a126a13bSWei Ding Value *ValTrunc = Builder.CreateTrunc(WidenLoad, IntNTy); 1319a126a13bSWei Ding Value *ValOrig = Builder.CreateBitCast(ValTrunc, I.getType()); 1320a126a13bSWei Ding I.replaceAllUsesWith(ValOrig); 1321a126a13bSWei Ding I.eraseFromParent(); 1322a126a13bSWei Ding return true; 1323a126a13bSWei Ding } 1324a126a13bSWei Ding 1325a126a13bSWei Ding return false; 1326a126a13bSWei Ding } 1327a126a13bSWei Ding 1328e14df4b2SKonstantin Zhuravlyov bool AMDGPUCodeGenPrepare::visitICmpInst(ICmpInst &I) { 1329e14df4b2SKonstantin Zhuravlyov bool Changed = false; 1330e14df4b2SKonstantin Zhuravlyov 1331f74fc60aSKonstantin Zhuravlyov if (ST->has16BitInsts() && needsPromotionToI32(I.getOperand(0)->getType()) && 1332f74fc60aSKonstantin Zhuravlyov DA->isUniform(&I)) 1333f74fc60aSKonstantin Zhuravlyov Changed |= promoteUniformOpToI32(I); 1334e14df4b2SKonstantin Zhuravlyov 1335e14df4b2SKonstantin Zhuravlyov return Changed; 1336e14df4b2SKonstantin Zhuravlyov } 1337e14df4b2SKonstantin Zhuravlyov 1338e14df4b2SKonstantin Zhuravlyov bool AMDGPUCodeGenPrepare::visitSelectInst(SelectInst &I) { 1339e14df4b2SKonstantin Zhuravlyov bool Changed = false; 1340e14df4b2SKonstantin Zhuravlyov 1341f74fc60aSKonstantin Zhuravlyov if (ST->has16BitInsts() && needsPromotionToI32(I.getType()) && 1342f74fc60aSKonstantin Zhuravlyov DA->isUniform(&I)) 1343f74fc60aSKonstantin Zhuravlyov Changed |= promoteUniformOpToI32(I); 1344b4eb5d50SKonstantin Zhuravlyov 1345b4eb5d50SKonstantin Zhuravlyov return Changed; 1346b4eb5d50SKonstantin Zhuravlyov } 1347b4eb5d50SKonstantin Zhuravlyov 1348b4eb5d50SKonstantin Zhuravlyov bool AMDGPUCodeGenPrepare::visitIntrinsicInst(IntrinsicInst &I) { 1349b4eb5d50SKonstantin Zhuravlyov switch (I.getIntrinsicID()) { 1350b4eb5d50SKonstantin Zhuravlyov case Intrinsic::bitreverse: 1351b4eb5d50SKonstantin Zhuravlyov return visitBitreverseIntrinsicInst(I); 1352b4eb5d50SKonstantin Zhuravlyov default: 1353b4eb5d50SKonstantin Zhuravlyov return false; 1354b4eb5d50SKonstantin Zhuravlyov } 1355b4eb5d50SKonstantin Zhuravlyov } 1356b4eb5d50SKonstantin Zhuravlyov 1357b4eb5d50SKonstantin Zhuravlyov bool AMDGPUCodeGenPrepare::visitBitreverseIntrinsicInst(IntrinsicInst &I) { 1358b4eb5d50SKonstantin Zhuravlyov bool Changed = false; 1359b4eb5d50SKonstantin Zhuravlyov 1360f74fc60aSKonstantin Zhuravlyov if (ST->has16BitInsts() && needsPromotionToI32(I.getType()) && 1361f74fc60aSKonstantin Zhuravlyov DA->isUniform(&I)) 1362f74fc60aSKonstantin Zhuravlyov Changed |= promoteUniformBitreverseToI32(I); 1363e14df4b2SKonstantin Zhuravlyov 1364e14df4b2SKonstantin Zhuravlyov return Changed; 1365e14df4b2SKonstantin Zhuravlyov } 1366e14df4b2SKonstantin Zhuravlyov 136786de486dSMatt Arsenault bool AMDGPUCodeGenPrepare::doInitialization(Module &M) { 1368a1fe17c9SMatt Arsenault Mod = &M; 136949169a96SMatt Arsenault DL = &Mod->getDataLayout(); 137086de486dSMatt Arsenault return false; 137186de486dSMatt Arsenault } 137286de486dSMatt Arsenault 137386de486dSMatt Arsenault bool AMDGPUCodeGenPrepare::runOnFunction(Function &F) { 13748b61764cSFrancis Visoiu Mistrih if (skipFunction(F)) 137586de486dSMatt Arsenault return false; 137686de486dSMatt Arsenault 13778b61764cSFrancis Visoiu Mistrih auto *TPC = getAnalysisIfAvailable<TargetPassConfig>(); 13788b61764cSFrancis Visoiu Mistrih if (!TPC) 13798b61764cSFrancis Visoiu Mistrih return false; 13808b61764cSFrancis Visoiu Mistrih 138112269ddaSMatt Arsenault const AMDGPUTargetMachine &TM = TPC->getTM<AMDGPUTargetMachine>(); 13825bfbae5cSTom Stellard ST = &TM.getSubtarget<GCNSubtarget>(F); 13837e7268acSStanislav Mekhanoshin AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 138435617ed4SNicolai Haehnle DA = &getAnalysis<LegacyDivergenceAnalysis>(); 1385b30e1223SMatt Arsenault 1386b30e1223SMatt Arsenault auto *DTWP = getAnalysisIfAvailable<DominatorTreeWrapperPass>(); 1387b30e1223SMatt Arsenault DT = DTWP ? &DTWP->getDomTree() : nullptr; 1388b30e1223SMatt Arsenault 1389a1fe17c9SMatt Arsenault HasUnsafeFPMath = hasUnsafeFPMath(F); 1390db0ed3e4SMatt Arsenault HasFP32Denormals = ST->hasFP32Denormals(F); 139186de486dSMatt Arsenault 1392a1fe17c9SMatt Arsenault bool MadeChange = false; 1393a1fe17c9SMatt Arsenault 139434d9a16eSMatt Arsenault Function::iterator NextBB; 139534d9a16eSMatt Arsenault for (Function::iterator FI = F.begin(), FE = F.end(); FI != FE; FI = NextBB) { 139634d9a16eSMatt Arsenault BasicBlock *BB = &*FI; 139734d9a16eSMatt Arsenault NextBB = std::next(FI); 139834d9a16eSMatt Arsenault 1399a1fe17c9SMatt Arsenault BasicBlock::iterator Next; 140034d9a16eSMatt Arsenault for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; I = Next) { 1401a1fe17c9SMatt Arsenault Next = std::next(I); 140234d9a16eSMatt Arsenault 1403a1fe17c9SMatt Arsenault MadeChange |= visit(*I); 140434d9a16eSMatt Arsenault 140534d9a16eSMatt Arsenault if (Next != E) { // Control flow changed 140634d9a16eSMatt Arsenault BasicBlock *NextInstBB = Next->getParent(); 140734d9a16eSMatt Arsenault if (NextInstBB != BB) { 140834d9a16eSMatt Arsenault BB = NextInstBB; 140934d9a16eSMatt Arsenault E = BB->end(); 141034d9a16eSMatt Arsenault FE = F.end(); 141134d9a16eSMatt Arsenault } 141234d9a16eSMatt Arsenault } 1413a1fe17c9SMatt Arsenault } 1414a1fe17c9SMatt Arsenault } 1415a1fe17c9SMatt Arsenault 1416a1fe17c9SMatt Arsenault return MadeChange; 141786de486dSMatt Arsenault } 141886de486dSMatt Arsenault 14198b61764cSFrancis Visoiu Mistrih INITIALIZE_PASS_BEGIN(AMDGPUCodeGenPrepare, DEBUG_TYPE, 142086de486dSMatt Arsenault "AMDGPU IR optimizations", false, false) 14217e7268acSStanislav Mekhanoshin INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 142235617ed4SNicolai Haehnle INITIALIZE_PASS_DEPENDENCY(LegacyDivergenceAnalysis) 14238b61764cSFrancis Visoiu Mistrih INITIALIZE_PASS_END(AMDGPUCodeGenPrepare, DEBUG_TYPE, "AMDGPU IR optimizations", 14248b61764cSFrancis Visoiu Mistrih false, false) 142586de486dSMatt Arsenault 142686de486dSMatt Arsenault char AMDGPUCodeGenPrepare::ID = 0; 142786de486dSMatt Arsenault 14288b61764cSFrancis Visoiu Mistrih FunctionPass *llvm::createAMDGPUCodeGenPreparePass() { 14298b61764cSFrancis Visoiu Mistrih return new AMDGPUCodeGenPrepare(); 143086de486dSMatt Arsenault } 1431