186de486dSMatt Arsenault //===-- AMDGPUCodeGenPrepare.cpp ------------------------------------------===// 286de486dSMatt Arsenault // 386de486dSMatt Arsenault // The LLVM Compiler Infrastructure 486de486dSMatt Arsenault // 586de486dSMatt Arsenault // This file is distributed under the University of Illinois Open Source 686de486dSMatt Arsenault // License. See LICENSE.TXT for details. 786de486dSMatt Arsenault // 886de486dSMatt Arsenault //===----------------------------------------------------------------------===// 986de486dSMatt Arsenault // 1086de486dSMatt Arsenault /// \file 1186de486dSMatt Arsenault /// This pass does misc. AMDGPU optimizations on IR before instruction 1286de486dSMatt Arsenault /// selection. 1386de486dSMatt Arsenault // 1486de486dSMatt Arsenault //===----------------------------------------------------------------------===// 1586de486dSMatt Arsenault 1686de486dSMatt Arsenault #include "AMDGPU.h" 1786de486dSMatt Arsenault #include "AMDGPUSubtarget.h" 18a1fe17c9SMatt Arsenault #include "AMDGPUTargetMachine.h" 19734bb7bbSEugene Zelenko #include "llvm/ADT/StringRef.h" 2086de486dSMatt Arsenault #include "llvm/Analysis/DivergenceAnalysis.h" 2186de486dSMatt Arsenault #include "llvm/CodeGen/Passes.h" 22734bb7bbSEugene Zelenko #include "llvm/IR/Attributes.h" 23734bb7bbSEugene Zelenko #include "llvm/IR/BasicBlock.h" 24734bb7bbSEugene Zelenko #include "llvm/IR/Constants.h" 25734bb7bbSEugene Zelenko #include "llvm/IR/DerivedTypes.h" 26734bb7bbSEugene Zelenko #include "llvm/IR/Function.h" 27734bb7bbSEugene Zelenko #include "llvm/IR/InstrTypes.h" 28734bb7bbSEugene Zelenko #include "llvm/IR/Instruction.h" 29734bb7bbSEugene Zelenko #include "llvm/IR/Instructions.h" 3086de486dSMatt Arsenault #include "llvm/IR/InstVisitor.h" 31734bb7bbSEugene Zelenko #include "llvm/IR/IntrinsicInst.h" 32734bb7bbSEugene Zelenko #include "llvm/IR/Intrinsics.h" 3386de486dSMatt Arsenault #include "llvm/IR/IRBuilder.h" 34734bb7bbSEugene Zelenko #include "llvm/IR/LLVMContext.h" 35734bb7bbSEugene Zelenko #include "llvm/IR/Operator.h" 36734bb7bbSEugene Zelenko #include "llvm/IR/Type.h" 37734bb7bbSEugene Zelenko #include "llvm/IR/Value.h" 38734bb7bbSEugene Zelenko #include "llvm/Pass.h" 39734bb7bbSEugene Zelenko #include "llvm/Support/Casting.h" 40734bb7bbSEugene Zelenko #include <cassert> 41734bb7bbSEugene Zelenko #include <iterator> 4286de486dSMatt Arsenault 4386de486dSMatt Arsenault #define DEBUG_TYPE "amdgpu-codegenprepare" 4486de486dSMatt Arsenault 4586de486dSMatt Arsenault using namespace llvm; 4686de486dSMatt Arsenault 4786de486dSMatt Arsenault namespace { 4886de486dSMatt Arsenault 4986de486dSMatt Arsenault class AMDGPUCodeGenPrepare : public FunctionPass, 50a1fe17c9SMatt Arsenault public InstVisitor<AMDGPUCodeGenPrepare, bool> { 51a1fe17c9SMatt Arsenault const GCNTargetMachine *TM; 52734bb7bbSEugene Zelenko const SISubtarget *ST = nullptr; 53734bb7bbSEugene Zelenko DivergenceAnalysis *DA = nullptr; 54734bb7bbSEugene Zelenko Module *Mod = nullptr; 55734bb7bbSEugene Zelenko bool HasUnsafeFPMath = false; 5686de486dSMatt Arsenault 57f74fc60aSKonstantin Zhuravlyov /// \brief Copies exact/nsw/nuw flags (if any) from binary operation \p I to 58f74fc60aSKonstantin Zhuravlyov /// binary operation \p V. 59e14df4b2SKonstantin Zhuravlyov /// 60f74fc60aSKonstantin Zhuravlyov /// \returns Binary operation \p V. 61f74fc60aSKonstantin Zhuravlyov /// \returns \p T's base element bit width. 62f74fc60aSKonstantin Zhuravlyov unsigned getBaseElementBitWidth(const Type *T) const; 63e14df4b2SKonstantin Zhuravlyov 64f74fc60aSKonstantin Zhuravlyov /// \returns Equivalent 32 bit integer type for given type \p T. For example, 65f74fc60aSKonstantin Zhuravlyov /// if \p T is i7, then i32 is returned; if \p T is <3 x i12>, then <3 x i32> 66f74fc60aSKonstantin Zhuravlyov /// is returned. 67e14df4b2SKonstantin Zhuravlyov Type *getI32Ty(IRBuilder<> &B, const Type *T) const; 68e14df4b2SKonstantin Zhuravlyov 69e14df4b2SKonstantin Zhuravlyov /// \returns True if binary operation \p I is a signed binary operation, false 70e14df4b2SKonstantin Zhuravlyov /// otherwise. 71e14df4b2SKonstantin Zhuravlyov bool isSigned(const BinaryOperator &I) const; 72e14df4b2SKonstantin Zhuravlyov 73e14df4b2SKonstantin Zhuravlyov /// \returns True if the condition of 'select' operation \p I comes from a 74e14df4b2SKonstantin Zhuravlyov /// signed 'icmp' operation, false otherwise. 75e14df4b2SKonstantin Zhuravlyov bool isSigned(const SelectInst &I) const; 76e14df4b2SKonstantin Zhuravlyov 77f74fc60aSKonstantin Zhuravlyov /// \returns True if type \p T needs to be promoted to 32 bit integer type, 78f74fc60aSKonstantin Zhuravlyov /// false otherwise. 79f74fc60aSKonstantin Zhuravlyov bool needsPromotionToI32(const Type *T) const; 80e14df4b2SKonstantin Zhuravlyov 81f74fc60aSKonstantin Zhuravlyov /// \brief Promotes uniform binary operation \p I to equivalent 32 bit binary 82f74fc60aSKonstantin Zhuravlyov /// operation. 83f74fc60aSKonstantin Zhuravlyov /// 84f74fc60aSKonstantin Zhuravlyov /// \details \p I's base element bit width must be greater than 1 and less 85f74fc60aSKonstantin Zhuravlyov /// than or equal 16. Promotion is done by sign or zero extending operands to 86f74fc60aSKonstantin Zhuravlyov /// 32 bits, replacing \p I with equivalent 32 bit binary operation, and 87f74fc60aSKonstantin Zhuravlyov /// truncating the result of 32 bit binary operation back to \p I's original 88f74fc60aSKonstantin Zhuravlyov /// type. Division operation is not promoted. 89f74fc60aSKonstantin Zhuravlyov /// 90f74fc60aSKonstantin Zhuravlyov /// \returns True if \p I is promoted to equivalent 32 bit binary operation, 91f74fc60aSKonstantin Zhuravlyov /// false otherwise. 92f74fc60aSKonstantin Zhuravlyov bool promoteUniformOpToI32(BinaryOperator &I) const; 93f74fc60aSKonstantin Zhuravlyov 94f74fc60aSKonstantin Zhuravlyov /// \brief Promotes uniform 'icmp' operation \p I to 32 bit 'icmp' operation. 95f74fc60aSKonstantin Zhuravlyov /// 96f74fc60aSKonstantin Zhuravlyov /// \details \p I's base element bit width must be greater than 1 and less 97f74fc60aSKonstantin Zhuravlyov /// than or equal 16. Promotion is done by sign or zero extending operands to 98f74fc60aSKonstantin Zhuravlyov /// 32 bits, and replacing \p I with 32 bit 'icmp' operation. 99e14df4b2SKonstantin Zhuravlyov /// 100e14df4b2SKonstantin Zhuravlyov /// \returns True. 101f74fc60aSKonstantin Zhuravlyov bool promoteUniformOpToI32(ICmpInst &I) const; 102e14df4b2SKonstantin Zhuravlyov 103f74fc60aSKonstantin Zhuravlyov /// \brief Promotes uniform 'select' operation \p I to 32 bit 'select' 104f74fc60aSKonstantin Zhuravlyov /// operation. 105f74fc60aSKonstantin Zhuravlyov /// 106f74fc60aSKonstantin Zhuravlyov /// \details \p I's base element bit width must be greater than 1 and less 107f74fc60aSKonstantin Zhuravlyov /// than or equal 16. Promotion is done by sign or zero extending operands to 108f74fc60aSKonstantin Zhuravlyov /// 32 bits, replacing \p I with 32 bit 'select' operation, and truncating the 109f74fc60aSKonstantin Zhuravlyov /// result of 32 bit 'select' operation back to \p I's original type. 110e14df4b2SKonstantin Zhuravlyov /// 111e14df4b2SKonstantin Zhuravlyov /// \returns True. 112f74fc60aSKonstantin Zhuravlyov bool promoteUniformOpToI32(SelectInst &I) const; 113b4eb5d50SKonstantin Zhuravlyov 114f74fc60aSKonstantin Zhuravlyov /// \brief Promotes uniform 'bitreverse' intrinsic \p I to 32 bit 'bitreverse' 115f74fc60aSKonstantin Zhuravlyov /// intrinsic. 116f74fc60aSKonstantin Zhuravlyov /// 117f74fc60aSKonstantin Zhuravlyov /// \details \p I's base element bit width must be greater than 1 and less 118f74fc60aSKonstantin Zhuravlyov /// than or equal 16. Promotion is done by zero extending the operand to 32 119f74fc60aSKonstantin Zhuravlyov /// bits, replacing \p I with 32 bit 'bitreverse' intrinsic, shifting the 120f74fc60aSKonstantin Zhuravlyov /// result of 32 bit 'bitreverse' intrinsic to the right with zero fill (the 121f74fc60aSKonstantin Zhuravlyov /// shift amount is 32 minus \p I's base element bit width), and truncating 122f74fc60aSKonstantin Zhuravlyov /// the result of the shift operation back to \p I's original type. 123b4eb5d50SKonstantin Zhuravlyov /// 124b4eb5d50SKonstantin Zhuravlyov /// \returns True. 125f74fc60aSKonstantin Zhuravlyov bool promoteUniformBitreverseToI32(IntrinsicInst &I) const; 126e14df4b2SKonstantin Zhuravlyov 12786de486dSMatt Arsenault public: 12886de486dSMatt Arsenault static char ID; 129734bb7bbSEugene Zelenko 13086de486dSMatt Arsenault AMDGPUCodeGenPrepare(const TargetMachine *TM = nullptr) : 131734bb7bbSEugene Zelenko FunctionPass(ID), TM(static_cast<const GCNTargetMachine *>(TM)) {} 132a1fe17c9SMatt Arsenault 133a1fe17c9SMatt Arsenault bool visitFDiv(BinaryOperator &I); 134a1fe17c9SMatt Arsenault 135e14df4b2SKonstantin Zhuravlyov bool visitInstruction(Instruction &I) { return false; } 136e14df4b2SKonstantin Zhuravlyov bool visitBinaryOperator(BinaryOperator &I); 137e14df4b2SKonstantin Zhuravlyov bool visitICmpInst(ICmpInst &I); 138e14df4b2SKonstantin Zhuravlyov bool visitSelectInst(SelectInst &I); 13986de486dSMatt Arsenault 140b4eb5d50SKonstantin Zhuravlyov bool visitIntrinsicInst(IntrinsicInst &I); 141b4eb5d50SKonstantin Zhuravlyov bool visitBitreverseIntrinsicInst(IntrinsicInst &I); 142b4eb5d50SKonstantin Zhuravlyov 14386de486dSMatt Arsenault bool doInitialization(Module &M) override; 14486de486dSMatt Arsenault bool runOnFunction(Function &F) override; 14586de486dSMatt Arsenault 146117296c0SMehdi Amini StringRef getPassName() const override { return "AMDGPU IR optimizations"; } 14786de486dSMatt Arsenault 14886de486dSMatt Arsenault void getAnalysisUsage(AnalysisUsage &AU) const override { 14986de486dSMatt Arsenault AU.addRequired<DivergenceAnalysis>(); 15086de486dSMatt Arsenault AU.setPreservesAll(); 15186de486dSMatt Arsenault } 15286de486dSMatt Arsenault }; 15386de486dSMatt Arsenault 154734bb7bbSEugene Zelenko } // end anonymous namespace 15586de486dSMatt Arsenault 156f74fc60aSKonstantin Zhuravlyov unsigned AMDGPUCodeGenPrepare::getBaseElementBitWidth(const Type *T) const { 157f74fc60aSKonstantin Zhuravlyov assert(needsPromotionToI32(T) && "T does not need promotion to i32"); 158e14df4b2SKonstantin Zhuravlyov 159e14df4b2SKonstantin Zhuravlyov if (T->isIntegerTy()) 160f74fc60aSKonstantin Zhuravlyov return T->getIntegerBitWidth(); 161f74fc60aSKonstantin Zhuravlyov return cast<VectorType>(T)->getElementType()->getIntegerBitWidth(); 162e14df4b2SKonstantin Zhuravlyov } 163e14df4b2SKonstantin Zhuravlyov 164e14df4b2SKonstantin Zhuravlyov Type *AMDGPUCodeGenPrepare::getI32Ty(IRBuilder<> &B, const Type *T) const { 165f74fc60aSKonstantin Zhuravlyov assert(needsPromotionToI32(T) && "T does not need promotion to i32"); 166e14df4b2SKonstantin Zhuravlyov 167e14df4b2SKonstantin Zhuravlyov if (T->isIntegerTy()) 168e14df4b2SKonstantin Zhuravlyov return B.getInt32Ty(); 169e14df4b2SKonstantin Zhuravlyov return VectorType::get(B.getInt32Ty(), cast<VectorType>(T)->getNumElements()); 170e14df4b2SKonstantin Zhuravlyov } 171e14df4b2SKonstantin Zhuravlyov 172e14df4b2SKonstantin Zhuravlyov bool AMDGPUCodeGenPrepare::isSigned(const BinaryOperator &I) const { 173691e2e02SKonstantin Zhuravlyov return I.getOpcode() == Instruction::AShr || 174691e2e02SKonstantin Zhuravlyov I.getOpcode() == Instruction::SDiv || I.getOpcode() == Instruction::SRem; 175e14df4b2SKonstantin Zhuravlyov } 176e14df4b2SKonstantin Zhuravlyov 177e14df4b2SKonstantin Zhuravlyov bool AMDGPUCodeGenPrepare::isSigned(const SelectInst &I) const { 178e14df4b2SKonstantin Zhuravlyov return isa<ICmpInst>(I.getOperand(0)) ? 179e14df4b2SKonstantin Zhuravlyov cast<ICmpInst>(I.getOperand(0))->isSigned() : false; 180e14df4b2SKonstantin Zhuravlyov } 181e14df4b2SKonstantin Zhuravlyov 182f74fc60aSKonstantin Zhuravlyov bool AMDGPUCodeGenPrepare::needsPromotionToI32(const Type *T) const { 183eb522e68SMatt Arsenault const IntegerType *IntTy = dyn_cast<IntegerType>(T); 184eb522e68SMatt Arsenault if (IntTy && IntTy->getBitWidth() > 1 && IntTy->getBitWidth() <= 16) 185f74fc60aSKonstantin Zhuravlyov return true; 186eb522e68SMatt Arsenault 187eb522e68SMatt Arsenault if (const VectorType *VT = dyn_cast<VectorType>(T)) { 188eb522e68SMatt Arsenault // TODO: The set of packed operations is more limited, so may want to 189eb522e68SMatt Arsenault // promote some anyway. 190eb522e68SMatt Arsenault if (ST->hasVOP3PInsts()) 191f74fc60aSKonstantin Zhuravlyov return false; 192eb522e68SMatt Arsenault 193eb522e68SMatt Arsenault return needsPromotionToI32(VT->getElementType()); 194eb522e68SMatt Arsenault } 195eb522e68SMatt Arsenault 196eb522e68SMatt Arsenault return false; 197f74fc60aSKonstantin Zhuravlyov } 198e14df4b2SKonstantin Zhuravlyov 199d59e6404SMatt Arsenault // Return true if the op promoted to i32 should have nsw set. 200d59e6404SMatt Arsenault static bool promotedOpIsNSW(const Instruction &I) { 201d59e6404SMatt Arsenault switch (I.getOpcode()) { 202d59e6404SMatt Arsenault case Instruction::Shl: 203d59e6404SMatt Arsenault case Instruction::Add: 204d59e6404SMatt Arsenault case Instruction::Sub: 205d59e6404SMatt Arsenault return true; 206d59e6404SMatt Arsenault case Instruction::Mul: 207d59e6404SMatt Arsenault return I.hasNoUnsignedWrap(); 208d59e6404SMatt Arsenault default: 209d59e6404SMatt Arsenault return false; 210d59e6404SMatt Arsenault } 211d59e6404SMatt Arsenault } 212d59e6404SMatt Arsenault 213d59e6404SMatt Arsenault // Return true if the op promoted to i32 should have nuw set. 214d59e6404SMatt Arsenault static bool promotedOpIsNUW(const Instruction &I) { 215d59e6404SMatt Arsenault switch (I.getOpcode()) { 216d59e6404SMatt Arsenault case Instruction::Shl: 217d59e6404SMatt Arsenault case Instruction::Add: 218d59e6404SMatt Arsenault case Instruction::Mul: 219d59e6404SMatt Arsenault return true; 220d59e6404SMatt Arsenault case Instruction::Sub: 221d59e6404SMatt Arsenault return I.hasNoUnsignedWrap(); 222d59e6404SMatt Arsenault default: 223d59e6404SMatt Arsenault return false; 224d59e6404SMatt Arsenault } 225d59e6404SMatt Arsenault } 226d59e6404SMatt Arsenault 227f74fc60aSKonstantin Zhuravlyov bool AMDGPUCodeGenPrepare::promoteUniformOpToI32(BinaryOperator &I) const { 228f74fc60aSKonstantin Zhuravlyov assert(needsPromotionToI32(I.getType()) && 229f74fc60aSKonstantin Zhuravlyov "I does not need promotion to i32"); 230f74fc60aSKonstantin Zhuravlyov 231f74fc60aSKonstantin Zhuravlyov if (I.getOpcode() == Instruction::SDiv || 232f74fc60aSKonstantin Zhuravlyov I.getOpcode() == Instruction::UDiv) 233e14df4b2SKonstantin Zhuravlyov return false; 234e14df4b2SKonstantin Zhuravlyov 235e14df4b2SKonstantin Zhuravlyov IRBuilder<> Builder(&I); 236e14df4b2SKonstantin Zhuravlyov Builder.SetCurrentDebugLocation(I.getDebugLoc()); 237e14df4b2SKonstantin Zhuravlyov 238e14df4b2SKonstantin Zhuravlyov Type *I32Ty = getI32Ty(Builder, I.getType()); 239e14df4b2SKonstantin Zhuravlyov Value *ExtOp0 = nullptr; 240e14df4b2SKonstantin Zhuravlyov Value *ExtOp1 = nullptr; 241e14df4b2SKonstantin Zhuravlyov Value *ExtRes = nullptr; 242e14df4b2SKonstantin Zhuravlyov Value *TruncRes = nullptr; 243e14df4b2SKonstantin Zhuravlyov 244e14df4b2SKonstantin Zhuravlyov if (isSigned(I)) { 245e14df4b2SKonstantin Zhuravlyov ExtOp0 = Builder.CreateSExt(I.getOperand(0), I32Ty); 246e14df4b2SKonstantin Zhuravlyov ExtOp1 = Builder.CreateSExt(I.getOperand(1), I32Ty); 247e14df4b2SKonstantin Zhuravlyov } else { 248e14df4b2SKonstantin Zhuravlyov ExtOp0 = Builder.CreateZExt(I.getOperand(0), I32Ty); 249e14df4b2SKonstantin Zhuravlyov ExtOp1 = Builder.CreateZExt(I.getOperand(1), I32Ty); 250e14df4b2SKonstantin Zhuravlyov } 251d59e6404SMatt Arsenault 252d59e6404SMatt Arsenault ExtRes = Builder.CreateBinOp(I.getOpcode(), ExtOp0, ExtOp1); 253d59e6404SMatt Arsenault if (Instruction *Inst = dyn_cast<Instruction>(ExtRes)) { 254d59e6404SMatt Arsenault if (promotedOpIsNSW(cast<Instruction>(I))) 255d59e6404SMatt Arsenault Inst->setHasNoSignedWrap(); 256d59e6404SMatt Arsenault 257d59e6404SMatt Arsenault if (promotedOpIsNUW(cast<Instruction>(I))) 258d59e6404SMatt Arsenault Inst->setHasNoUnsignedWrap(); 259d59e6404SMatt Arsenault 260d59e6404SMatt Arsenault if (const auto *ExactOp = dyn_cast<PossiblyExactOperator>(&I)) 261d59e6404SMatt Arsenault Inst->setIsExact(ExactOp->isExact()); 262d59e6404SMatt Arsenault } 263d59e6404SMatt Arsenault 264f74fc60aSKonstantin Zhuravlyov TruncRes = Builder.CreateTrunc(ExtRes, I.getType()); 265e14df4b2SKonstantin Zhuravlyov 266e14df4b2SKonstantin Zhuravlyov I.replaceAllUsesWith(TruncRes); 267e14df4b2SKonstantin Zhuravlyov I.eraseFromParent(); 268e14df4b2SKonstantin Zhuravlyov 269e14df4b2SKonstantin Zhuravlyov return true; 270e14df4b2SKonstantin Zhuravlyov } 271e14df4b2SKonstantin Zhuravlyov 272f74fc60aSKonstantin Zhuravlyov bool AMDGPUCodeGenPrepare::promoteUniformOpToI32(ICmpInst &I) const { 273f74fc60aSKonstantin Zhuravlyov assert(needsPromotionToI32(I.getOperand(0)->getType()) && 274f74fc60aSKonstantin Zhuravlyov "I does not need promotion to i32"); 275e14df4b2SKonstantin Zhuravlyov 276e14df4b2SKonstantin Zhuravlyov IRBuilder<> Builder(&I); 277e14df4b2SKonstantin Zhuravlyov Builder.SetCurrentDebugLocation(I.getDebugLoc()); 278e14df4b2SKonstantin Zhuravlyov 279f74fc60aSKonstantin Zhuravlyov Type *I32Ty = getI32Ty(Builder, I.getOperand(0)->getType()); 280e14df4b2SKonstantin Zhuravlyov Value *ExtOp0 = nullptr; 281e14df4b2SKonstantin Zhuravlyov Value *ExtOp1 = nullptr; 282e14df4b2SKonstantin Zhuravlyov Value *NewICmp = nullptr; 283e14df4b2SKonstantin Zhuravlyov 284e14df4b2SKonstantin Zhuravlyov if (I.isSigned()) { 285f74fc60aSKonstantin Zhuravlyov ExtOp0 = Builder.CreateSExt(I.getOperand(0), I32Ty); 286f74fc60aSKonstantin Zhuravlyov ExtOp1 = Builder.CreateSExt(I.getOperand(1), I32Ty); 287e14df4b2SKonstantin Zhuravlyov } else { 288f74fc60aSKonstantin Zhuravlyov ExtOp0 = Builder.CreateZExt(I.getOperand(0), I32Ty); 289f74fc60aSKonstantin Zhuravlyov ExtOp1 = Builder.CreateZExt(I.getOperand(1), I32Ty); 290e14df4b2SKonstantin Zhuravlyov } 291e14df4b2SKonstantin Zhuravlyov NewICmp = Builder.CreateICmp(I.getPredicate(), ExtOp0, ExtOp1); 292e14df4b2SKonstantin Zhuravlyov 293e14df4b2SKonstantin Zhuravlyov I.replaceAllUsesWith(NewICmp); 294e14df4b2SKonstantin Zhuravlyov I.eraseFromParent(); 295e14df4b2SKonstantin Zhuravlyov 296e14df4b2SKonstantin Zhuravlyov return true; 297e14df4b2SKonstantin Zhuravlyov } 298e14df4b2SKonstantin Zhuravlyov 299f74fc60aSKonstantin Zhuravlyov bool AMDGPUCodeGenPrepare::promoteUniformOpToI32(SelectInst &I) const { 300f74fc60aSKonstantin Zhuravlyov assert(needsPromotionToI32(I.getType()) && 301f74fc60aSKonstantin Zhuravlyov "I does not need promotion to i32"); 302e14df4b2SKonstantin Zhuravlyov 303e14df4b2SKonstantin Zhuravlyov IRBuilder<> Builder(&I); 304e14df4b2SKonstantin Zhuravlyov Builder.SetCurrentDebugLocation(I.getDebugLoc()); 305e14df4b2SKonstantin Zhuravlyov 306e14df4b2SKonstantin Zhuravlyov Type *I32Ty = getI32Ty(Builder, I.getType()); 307e14df4b2SKonstantin Zhuravlyov Value *ExtOp1 = nullptr; 308e14df4b2SKonstantin Zhuravlyov Value *ExtOp2 = nullptr; 309e14df4b2SKonstantin Zhuravlyov Value *ExtRes = nullptr; 310e14df4b2SKonstantin Zhuravlyov Value *TruncRes = nullptr; 311e14df4b2SKonstantin Zhuravlyov 312e14df4b2SKonstantin Zhuravlyov if (isSigned(I)) { 313e14df4b2SKonstantin Zhuravlyov ExtOp1 = Builder.CreateSExt(I.getOperand(1), I32Ty); 314e14df4b2SKonstantin Zhuravlyov ExtOp2 = Builder.CreateSExt(I.getOperand(2), I32Ty); 315e14df4b2SKonstantin Zhuravlyov } else { 316e14df4b2SKonstantin Zhuravlyov ExtOp1 = Builder.CreateZExt(I.getOperand(1), I32Ty); 317e14df4b2SKonstantin Zhuravlyov ExtOp2 = Builder.CreateZExt(I.getOperand(2), I32Ty); 318e14df4b2SKonstantin Zhuravlyov } 319e14df4b2SKonstantin Zhuravlyov ExtRes = Builder.CreateSelect(I.getOperand(0), ExtOp1, ExtOp2); 320f74fc60aSKonstantin Zhuravlyov TruncRes = Builder.CreateTrunc(ExtRes, I.getType()); 321e14df4b2SKonstantin Zhuravlyov 322e14df4b2SKonstantin Zhuravlyov I.replaceAllUsesWith(TruncRes); 323e14df4b2SKonstantin Zhuravlyov I.eraseFromParent(); 324e14df4b2SKonstantin Zhuravlyov 325e14df4b2SKonstantin Zhuravlyov return true; 326e14df4b2SKonstantin Zhuravlyov } 327e14df4b2SKonstantin Zhuravlyov 328f74fc60aSKonstantin Zhuravlyov bool AMDGPUCodeGenPrepare::promoteUniformBitreverseToI32( 329b4eb5d50SKonstantin Zhuravlyov IntrinsicInst &I) const { 330f74fc60aSKonstantin Zhuravlyov assert(I.getIntrinsicID() == Intrinsic::bitreverse && 331f74fc60aSKonstantin Zhuravlyov "I must be bitreverse intrinsic"); 332f74fc60aSKonstantin Zhuravlyov assert(needsPromotionToI32(I.getType()) && 333f74fc60aSKonstantin Zhuravlyov "I does not need promotion to i32"); 334b4eb5d50SKonstantin Zhuravlyov 335b4eb5d50SKonstantin Zhuravlyov IRBuilder<> Builder(&I); 336b4eb5d50SKonstantin Zhuravlyov Builder.SetCurrentDebugLocation(I.getDebugLoc()); 337b4eb5d50SKonstantin Zhuravlyov 338b4eb5d50SKonstantin Zhuravlyov Type *I32Ty = getI32Ty(Builder, I.getType()); 339b4eb5d50SKonstantin Zhuravlyov Function *I32 = 340c09e2d7eSKonstantin Zhuravlyov Intrinsic::getDeclaration(Mod, Intrinsic::bitreverse, { I32Ty }); 341b4eb5d50SKonstantin Zhuravlyov Value *ExtOp = Builder.CreateZExt(I.getOperand(0), I32Ty); 342b4eb5d50SKonstantin Zhuravlyov Value *ExtRes = Builder.CreateCall(I32, { ExtOp }); 343f74fc60aSKonstantin Zhuravlyov Value *LShrOp = 344f74fc60aSKonstantin Zhuravlyov Builder.CreateLShr(ExtRes, 32 - getBaseElementBitWidth(I.getType())); 345b4eb5d50SKonstantin Zhuravlyov Value *TruncRes = 346f74fc60aSKonstantin Zhuravlyov Builder.CreateTrunc(LShrOp, I.getType()); 347b4eb5d50SKonstantin Zhuravlyov 348b4eb5d50SKonstantin Zhuravlyov I.replaceAllUsesWith(TruncRes); 349b4eb5d50SKonstantin Zhuravlyov I.eraseFromParent(); 350b4eb5d50SKonstantin Zhuravlyov 351b4eb5d50SKonstantin Zhuravlyov return true; 352b4eb5d50SKonstantin Zhuravlyov } 353b4eb5d50SKonstantin Zhuravlyov 354a1fe17c9SMatt Arsenault static bool shouldKeepFDivF32(Value *Num, bool UnsafeDiv) { 355a1fe17c9SMatt Arsenault const ConstantFP *CNum = dyn_cast<ConstantFP>(Num); 356a1fe17c9SMatt Arsenault if (!CNum) 357a1fe17c9SMatt Arsenault return false; 358a1fe17c9SMatt Arsenault 359a1fe17c9SMatt Arsenault // Reciprocal f32 is handled separately without denormals. 360e3862cdcSMatt Arsenault return UnsafeDiv || CNum->isExactlyValue(+1.0); 361a1fe17c9SMatt Arsenault } 362a1fe17c9SMatt Arsenault 363a1fe17c9SMatt Arsenault // Insert an intrinsic for fast fdiv for safe math situations where we can 364a1fe17c9SMatt Arsenault // reduce precision. Leave fdiv for situations where the generic node is 365a1fe17c9SMatt Arsenault // expected to be optimized. 366a1fe17c9SMatt Arsenault bool AMDGPUCodeGenPrepare::visitFDiv(BinaryOperator &FDiv) { 367a1fe17c9SMatt Arsenault Type *Ty = FDiv.getType(); 368a1fe17c9SMatt Arsenault 369a1fe17c9SMatt Arsenault if (!Ty->getScalarType()->isFloatTy()) 370a1fe17c9SMatt Arsenault return false; 371a1fe17c9SMatt Arsenault 372a1fe17c9SMatt Arsenault MDNode *FPMath = FDiv.getMetadata(LLVMContext::MD_fpmath); 373a1fe17c9SMatt Arsenault if (!FPMath) 374a1fe17c9SMatt Arsenault return false; 375a1fe17c9SMatt Arsenault 376a1fe17c9SMatt Arsenault const FPMathOperator *FPOp = cast<const FPMathOperator>(&FDiv); 377a1fe17c9SMatt Arsenault float ULP = FPOp->getFPAccuracy(); 378a1fe17c9SMatt Arsenault if (ULP < 2.5f) 379a1fe17c9SMatt Arsenault return false; 380a1fe17c9SMatt Arsenault 381a1fe17c9SMatt Arsenault FastMathFlags FMF = FPOp->getFastMathFlags(); 382a1fe17c9SMatt Arsenault bool UnsafeDiv = HasUnsafeFPMath || FMF.unsafeAlgebra() || 383a1fe17c9SMatt Arsenault FMF.allowReciprocal(); 384a1fe17c9SMatt Arsenault if (ST->hasFP32Denormals() && !UnsafeDiv) 385a1fe17c9SMatt Arsenault return false; 386a1fe17c9SMatt Arsenault 387a1fe17c9SMatt Arsenault IRBuilder<> Builder(FDiv.getParent(), std::next(FDiv.getIterator()), FPMath); 388a1fe17c9SMatt Arsenault Builder.setFastMathFlags(FMF); 389a1fe17c9SMatt Arsenault Builder.SetCurrentDebugLocation(FDiv.getDebugLoc()); 390a1fe17c9SMatt Arsenault 391*c5b641acSMatt Arsenault Function *Decl = Intrinsic::getDeclaration(Mod, Intrinsic::amdgcn_fdiv_fast); 392a1fe17c9SMatt Arsenault 393a1fe17c9SMatt Arsenault Value *Num = FDiv.getOperand(0); 394a1fe17c9SMatt Arsenault Value *Den = FDiv.getOperand(1); 395a1fe17c9SMatt Arsenault 396a1fe17c9SMatt Arsenault Value *NewFDiv = nullptr; 397a1fe17c9SMatt Arsenault 398a1fe17c9SMatt Arsenault if (VectorType *VT = dyn_cast<VectorType>(Ty)) { 399a1fe17c9SMatt Arsenault NewFDiv = UndefValue::get(VT); 400a1fe17c9SMatt Arsenault 401a1fe17c9SMatt Arsenault // FIXME: Doesn't do the right thing for cases where the vector is partially 402a1fe17c9SMatt Arsenault // constant. This works when the scalarizer pass is run first. 403a1fe17c9SMatt Arsenault for (unsigned I = 0, E = VT->getNumElements(); I != E; ++I) { 404a1fe17c9SMatt Arsenault Value *NumEltI = Builder.CreateExtractElement(Num, I); 405a1fe17c9SMatt Arsenault Value *DenEltI = Builder.CreateExtractElement(Den, I); 406a1fe17c9SMatt Arsenault Value *NewElt; 407a1fe17c9SMatt Arsenault 408a1fe17c9SMatt Arsenault if (shouldKeepFDivF32(NumEltI, UnsafeDiv)) { 409a1fe17c9SMatt Arsenault NewElt = Builder.CreateFDiv(NumEltI, DenEltI); 410a1fe17c9SMatt Arsenault } else { 411a1fe17c9SMatt Arsenault NewElt = Builder.CreateCall(Decl, { NumEltI, DenEltI }); 412a1fe17c9SMatt Arsenault } 413a1fe17c9SMatt Arsenault 414a1fe17c9SMatt Arsenault NewFDiv = Builder.CreateInsertElement(NewFDiv, NewElt, I); 415a1fe17c9SMatt Arsenault } 416a1fe17c9SMatt Arsenault } else { 417a1fe17c9SMatt Arsenault if (!shouldKeepFDivF32(Num, UnsafeDiv)) 418a1fe17c9SMatt Arsenault NewFDiv = Builder.CreateCall(Decl, { Num, Den }); 419a1fe17c9SMatt Arsenault } 420a1fe17c9SMatt Arsenault 421a1fe17c9SMatt Arsenault if (NewFDiv) { 422a1fe17c9SMatt Arsenault FDiv.replaceAllUsesWith(NewFDiv); 423a1fe17c9SMatt Arsenault NewFDiv->takeName(&FDiv); 424a1fe17c9SMatt Arsenault FDiv.eraseFromParent(); 425a1fe17c9SMatt Arsenault } 426a1fe17c9SMatt Arsenault 427a1fe17c9SMatt Arsenault return true; 428a1fe17c9SMatt Arsenault } 429a1fe17c9SMatt Arsenault 430a1fe17c9SMatt Arsenault static bool hasUnsafeFPMath(const Function &F) { 431a1fe17c9SMatt Arsenault Attribute Attr = F.getFnAttribute("unsafe-fp-math"); 432a1fe17c9SMatt Arsenault return Attr.getValueAsString() == "true"; 433a1fe17c9SMatt Arsenault } 434a1fe17c9SMatt Arsenault 435e14df4b2SKonstantin Zhuravlyov bool AMDGPUCodeGenPrepare::visitBinaryOperator(BinaryOperator &I) { 436e14df4b2SKonstantin Zhuravlyov bool Changed = false; 437e14df4b2SKonstantin Zhuravlyov 438f74fc60aSKonstantin Zhuravlyov if (ST->has16BitInsts() && needsPromotionToI32(I.getType()) && 439f74fc60aSKonstantin Zhuravlyov DA->isUniform(&I)) 440f74fc60aSKonstantin Zhuravlyov Changed |= promoteUniformOpToI32(I); 441e14df4b2SKonstantin Zhuravlyov 442e14df4b2SKonstantin Zhuravlyov return Changed; 443e14df4b2SKonstantin Zhuravlyov } 444e14df4b2SKonstantin Zhuravlyov 445e14df4b2SKonstantin Zhuravlyov bool AMDGPUCodeGenPrepare::visitICmpInst(ICmpInst &I) { 446e14df4b2SKonstantin Zhuravlyov bool Changed = false; 447e14df4b2SKonstantin Zhuravlyov 448f74fc60aSKonstantin Zhuravlyov if (ST->has16BitInsts() && needsPromotionToI32(I.getOperand(0)->getType()) && 449f74fc60aSKonstantin Zhuravlyov DA->isUniform(&I)) 450f74fc60aSKonstantin Zhuravlyov Changed |= promoteUniformOpToI32(I); 451e14df4b2SKonstantin Zhuravlyov 452e14df4b2SKonstantin Zhuravlyov return Changed; 453e14df4b2SKonstantin Zhuravlyov } 454e14df4b2SKonstantin Zhuravlyov 455e14df4b2SKonstantin Zhuravlyov bool AMDGPUCodeGenPrepare::visitSelectInst(SelectInst &I) { 456e14df4b2SKonstantin Zhuravlyov bool Changed = false; 457e14df4b2SKonstantin Zhuravlyov 458f74fc60aSKonstantin Zhuravlyov if (ST->has16BitInsts() && needsPromotionToI32(I.getType()) && 459f74fc60aSKonstantin Zhuravlyov DA->isUniform(&I)) 460f74fc60aSKonstantin Zhuravlyov Changed |= promoteUniformOpToI32(I); 461b4eb5d50SKonstantin Zhuravlyov 462b4eb5d50SKonstantin Zhuravlyov return Changed; 463b4eb5d50SKonstantin Zhuravlyov } 464b4eb5d50SKonstantin Zhuravlyov 465b4eb5d50SKonstantin Zhuravlyov bool AMDGPUCodeGenPrepare::visitIntrinsicInst(IntrinsicInst &I) { 466b4eb5d50SKonstantin Zhuravlyov switch (I.getIntrinsicID()) { 467b4eb5d50SKonstantin Zhuravlyov case Intrinsic::bitreverse: 468b4eb5d50SKonstantin Zhuravlyov return visitBitreverseIntrinsicInst(I); 469b4eb5d50SKonstantin Zhuravlyov default: 470b4eb5d50SKonstantin Zhuravlyov return false; 471b4eb5d50SKonstantin Zhuravlyov } 472b4eb5d50SKonstantin Zhuravlyov } 473b4eb5d50SKonstantin Zhuravlyov 474b4eb5d50SKonstantin Zhuravlyov bool AMDGPUCodeGenPrepare::visitBitreverseIntrinsicInst(IntrinsicInst &I) { 475b4eb5d50SKonstantin Zhuravlyov bool Changed = false; 476b4eb5d50SKonstantin Zhuravlyov 477f74fc60aSKonstantin Zhuravlyov if (ST->has16BitInsts() && needsPromotionToI32(I.getType()) && 478f74fc60aSKonstantin Zhuravlyov DA->isUniform(&I)) 479f74fc60aSKonstantin Zhuravlyov Changed |= promoteUniformBitreverseToI32(I); 480e14df4b2SKonstantin Zhuravlyov 481e14df4b2SKonstantin Zhuravlyov return Changed; 482e14df4b2SKonstantin Zhuravlyov } 483e14df4b2SKonstantin Zhuravlyov 48486de486dSMatt Arsenault bool AMDGPUCodeGenPrepare::doInitialization(Module &M) { 485a1fe17c9SMatt Arsenault Mod = &M; 48686de486dSMatt Arsenault return false; 48786de486dSMatt Arsenault } 48886de486dSMatt Arsenault 48986de486dSMatt Arsenault bool AMDGPUCodeGenPrepare::runOnFunction(Function &F) { 49086de486dSMatt Arsenault if (!TM || skipFunction(F)) 49186de486dSMatt Arsenault return false; 49286de486dSMatt Arsenault 493a1fe17c9SMatt Arsenault ST = &TM->getSubtarget<SISubtarget>(F); 49486de486dSMatt Arsenault DA = &getAnalysis<DivergenceAnalysis>(); 495a1fe17c9SMatt Arsenault HasUnsafeFPMath = hasUnsafeFPMath(F); 49686de486dSMatt Arsenault 497a1fe17c9SMatt Arsenault bool MadeChange = false; 498a1fe17c9SMatt Arsenault 499a1fe17c9SMatt Arsenault for (BasicBlock &BB : F) { 500a1fe17c9SMatt Arsenault BasicBlock::iterator Next; 501a1fe17c9SMatt Arsenault for (BasicBlock::iterator I = BB.begin(), E = BB.end(); I != E; I = Next) { 502a1fe17c9SMatt Arsenault Next = std::next(I); 503a1fe17c9SMatt Arsenault MadeChange |= visit(*I); 504a1fe17c9SMatt Arsenault } 505a1fe17c9SMatt Arsenault } 506a1fe17c9SMatt Arsenault 507a1fe17c9SMatt Arsenault return MadeChange; 50886de486dSMatt Arsenault } 50986de486dSMatt Arsenault 51086de486dSMatt Arsenault INITIALIZE_TM_PASS_BEGIN(AMDGPUCodeGenPrepare, DEBUG_TYPE, 51186de486dSMatt Arsenault "AMDGPU IR optimizations", false, false) 51286de486dSMatt Arsenault INITIALIZE_PASS_DEPENDENCY(DivergenceAnalysis) 51386de486dSMatt Arsenault INITIALIZE_TM_PASS_END(AMDGPUCodeGenPrepare, DEBUG_TYPE, 51486de486dSMatt Arsenault "AMDGPU IR optimizations", false, false) 51586de486dSMatt Arsenault 51686de486dSMatt Arsenault char AMDGPUCodeGenPrepare::ID = 0; 51786de486dSMatt Arsenault 518a1fe17c9SMatt Arsenault FunctionPass *llvm::createAMDGPUCodeGenPreparePass(const GCNTargetMachine *TM) { 51986de486dSMatt Arsenault return new AMDGPUCodeGenPrepare(TM); 52086de486dSMatt Arsenault } 521