1 //===-- AMDGPUCodeGenPrepare.cpp ------------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 /// \file 10 /// This pass does misc. AMDGPU optimizations on IR before instruction 11 /// selection. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "AMDGPU.h" 16 #include "AMDGPUSubtarget.h" 17 #include "AMDGPUTargetMachine.h" 18 #include "llvm/ADT/StringRef.h" 19 #include "llvm/Analysis/AssumptionCache.h" 20 #include "llvm/Analysis/LegacyDivergenceAnalysis.h" 21 #include "llvm/Analysis/Loads.h" 22 #include "llvm/Analysis/ValueTracking.h" 23 #include "llvm/CodeGen/Passes.h" 24 #include "llvm/CodeGen/TargetPassConfig.h" 25 #include "llvm/IR/Attributes.h" 26 #include "llvm/IR/BasicBlock.h" 27 #include "llvm/IR/Constants.h" 28 #include "llvm/IR/DerivedTypes.h" 29 #include "llvm/IR/Function.h" 30 #include "llvm/IR/IRBuilder.h" 31 #include "llvm/IR/InstVisitor.h" 32 #include "llvm/IR/InstrTypes.h" 33 #include "llvm/IR/Instruction.h" 34 #include "llvm/IR/Instructions.h" 35 #include "llvm/IR/IntrinsicInst.h" 36 #include "llvm/IR/Intrinsics.h" 37 #include "llvm/IR/LLVMContext.h" 38 #include "llvm/IR/Operator.h" 39 #include "llvm/IR/Type.h" 40 #include "llvm/IR/Value.h" 41 #include "llvm/InitializePasses.h" 42 #include "llvm/Pass.h" 43 #include "llvm/Support/Casting.h" 44 #include <cassert> 45 #include <iterator> 46 47 #define DEBUG_TYPE "amdgpu-codegenprepare" 48 49 using namespace llvm; 50 51 namespace { 52 53 static cl::opt<bool> WidenLoads( 54 "amdgpu-codegenprepare-widen-constant-loads", 55 cl::desc("Widen sub-dword constant address space loads in AMDGPUCodeGenPrepare"), 56 cl::ReallyHidden, 57 cl::init(true)); 58 59 static cl::opt<bool> UseMul24Intrin( 60 "amdgpu-codegenprepare-mul24", 61 cl::desc("Introduce mul24 intrinsics in AMDGPUCodeGenPrepare"), 62 cl::ReallyHidden, 63 cl::init(true)); 64 65 class AMDGPUCodeGenPrepare : public FunctionPass, 66 public InstVisitor<AMDGPUCodeGenPrepare, bool> { 67 const GCNSubtarget *ST = nullptr; 68 AssumptionCache *AC = nullptr; 69 LegacyDivergenceAnalysis *DA = nullptr; 70 Module *Mod = nullptr; 71 const DataLayout *DL = nullptr; 72 bool HasUnsafeFPMath = false; 73 bool HasFP32Denormals = false; 74 75 /// Copies exact/nsw/nuw flags (if any) from binary operation \p I to 76 /// binary operation \p V. 77 /// 78 /// \returns Binary operation \p V. 79 /// \returns \p T's base element bit width. 80 unsigned getBaseElementBitWidth(const Type *T) const; 81 82 /// \returns Equivalent 32 bit integer type for given type \p T. For example, 83 /// if \p T is i7, then i32 is returned; if \p T is <3 x i12>, then <3 x i32> 84 /// is returned. 85 Type *getI32Ty(IRBuilder<> &B, const Type *T) const; 86 87 /// \returns True if binary operation \p I is a signed binary operation, false 88 /// otherwise. 89 bool isSigned(const BinaryOperator &I) const; 90 91 /// \returns True if the condition of 'select' operation \p I comes from a 92 /// signed 'icmp' operation, false otherwise. 93 bool isSigned(const SelectInst &I) const; 94 95 /// \returns True if type \p T needs to be promoted to 32 bit integer type, 96 /// false otherwise. 97 bool needsPromotionToI32(const Type *T) const; 98 99 /// Promotes uniform binary operation \p I to equivalent 32 bit binary 100 /// operation. 101 /// 102 /// \details \p I's base element bit width must be greater than 1 and less 103 /// than or equal 16. Promotion is done by sign or zero extending operands to 104 /// 32 bits, replacing \p I with equivalent 32 bit binary operation, and 105 /// truncating the result of 32 bit binary operation back to \p I's original 106 /// type. Division operation is not promoted. 107 /// 108 /// \returns True if \p I is promoted to equivalent 32 bit binary operation, 109 /// false otherwise. 110 bool promoteUniformOpToI32(BinaryOperator &I) const; 111 112 /// Promotes uniform 'icmp' operation \p I to 32 bit 'icmp' operation. 113 /// 114 /// \details \p I's base element bit width must be greater than 1 and less 115 /// than or equal 16. Promotion is done by sign or zero extending operands to 116 /// 32 bits, and replacing \p I with 32 bit 'icmp' operation. 117 /// 118 /// \returns True. 119 bool promoteUniformOpToI32(ICmpInst &I) const; 120 121 /// Promotes uniform 'select' operation \p I to 32 bit 'select' 122 /// operation. 123 /// 124 /// \details \p I's base element bit width must be greater than 1 and less 125 /// than or equal 16. Promotion is done by sign or zero extending operands to 126 /// 32 bits, replacing \p I with 32 bit 'select' operation, and truncating the 127 /// result of 32 bit 'select' operation back to \p I's original type. 128 /// 129 /// \returns True. 130 bool promoteUniformOpToI32(SelectInst &I) const; 131 132 /// Promotes uniform 'bitreverse' intrinsic \p I to 32 bit 'bitreverse' 133 /// intrinsic. 134 /// 135 /// \details \p I's base element bit width must be greater than 1 and less 136 /// than or equal 16. Promotion is done by zero extending the operand to 32 137 /// bits, replacing \p I with 32 bit 'bitreverse' intrinsic, shifting the 138 /// result of 32 bit 'bitreverse' intrinsic to the right with zero fill (the 139 /// shift amount is 32 minus \p I's base element bit width), and truncating 140 /// the result of the shift operation back to \p I's original type. 141 /// 142 /// \returns True. 143 bool promoteUniformBitreverseToI32(IntrinsicInst &I) const; 144 145 146 unsigned numBitsUnsigned(Value *Op, unsigned ScalarSize) const; 147 unsigned numBitsSigned(Value *Op, unsigned ScalarSize) const; 148 bool isI24(Value *V, unsigned ScalarSize) const; 149 bool isU24(Value *V, unsigned ScalarSize) const; 150 151 /// Replace mul instructions with llvm.amdgcn.mul.u24 or llvm.amdgcn.mul.s24. 152 /// SelectionDAG has an issue where an and asserting the bits are known 153 bool replaceMulWithMul24(BinaryOperator &I) const; 154 155 /// Expands 24 bit div or rem. 156 Value* expandDivRem24(IRBuilder<> &Builder, BinaryOperator &I, 157 Value *Num, Value *Den, 158 bool IsDiv, bool IsSigned) const; 159 160 /// Expands 32 bit div or rem. 161 Value* expandDivRem32(IRBuilder<> &Builder, BinaryOperator &I, 162 Value *Num, Value *Den) const; 163 164 /// Widen a scalar load. 165 /// 166 /// \details \p Widen scalar load for uniform, small type loads from constant 167 // memory / to a full 32-bits and then truncate the input to allow a scalar 168 // load instead of a vector load. 169 // 170 /// \returns True. 171 172 bool canWidenScalarExtLoad(LoadInst &I) const; 173 174 public: 175 static char ID; 176 177 AMDGPUCodeGenPrepare() : FunctionPass(ID) {} 178 179 bool visitFDiv(BinaryOperator &I); 180 181 bool visitInstruction(Instruction &I) { return false; } 182 bool visitBinaryOperator(BinaryOperator &I); 183 bool visitLoadInst(LoadInst &I); 184 bool visitICmpInst(ICmpInst &I); 185 bool visitSelectInst(SelectInst &I); 186 187 bool visitIntrinsicInst(IntrinsicInst &I); 188 bool visitBitreverseIntrinsicInst(IntrinsicInst &I); 189 190 bool doInitialization(Module &M) override; 191 bool runOnFunction(Function &F) override; 192 193 StringRef getPassName() const override { return "AMDGPU IR optimizations"; } 194 195 void getAnalysisUsage(AnalysisUsage &AU) const override { 196 AU.addRequired<AssumptionCacheTracker>(); 197 AU.addRequired<LegacyDivergenceAnalysis>(); 198 AU.setPreservesAll(); 199 } 200 }; 201 202 } // end anonymous namespace 203 204 unsigned AMDGPUCodeGenPrepare::getBaseElementBitWidth(const Type *T) const { 205 assert(needsPromotionToI32(T) && "T does not need promotion to i32"); 206 207 if (T->isIntegerTy()) 208 return T->getIntegerBitWidth(); 209 return cast<VectorType>(T)->getElementType()->getIntegerBitWidth(); 210 } 211 212 Type *AMDGPUCodeGenPrepare::getI32Ty(IRBuilder<> &B, const Type *T) const { 213 assert(needsPromotionToI32(T) && "T does not need promotion to i32"); 214 215 if (T->isIntegerTy()) 216 return B.getInt32Ty(); 217 return VectorType::get(B.getInt32Ty(), cast<VectorType>(T)->getNumElements()); 218 } 219 220 bool AMDGPUCodeGenPrepare::isSigned(const BinaryOperator &I) const { 221 return I.getOpcode() == Instruction::AShr || 222 I.getOpcode() == Instruction::SDiv || I.getOpcode() == Instruction::SRem; 223 } 224 225 bool AMDGPUCodeGenPrepare::isSigned(const SelectInst &I) const { 226 return isa<ICmpInst>(I.getOperand(0)) ? 227 cast<ICmpInst>(I.getOperand(0))->isSigned() : false; 228 } 229 230 bool AMDGPUCodeGenPrepare::needsPromotionToI32(const Type *T) const { 231 const IntegerType *IntTy = dyn_cast<IntegerType>(T); 232 if (IntTy && IntTy->getBitWidth() > 1 && IntTy->getBitWidth() <= 16) 233 return true; 234 235 if (const VectorType *VT = dyn_cast<VectorType>(T)) { 236 // TODO: The set of packed operations is more limited, so may want to 237 // promote some anyway. 238 if (ST->hasVOP3PInsts()) 239 return false; 240 241 return needsPromotionToI32(VT->getElementType()); 242 } 243 244 return false; 245 } 246 247 // Return true if the op promoted to i32 should have nsw set. 248 static bool promotedOpIsNSW(const Instruction &I) { 249 switch (I.getOpcode()) { 250 case Instruction::Shl: 251 case Instruction::Add: 252 case Instruction::Sub: 253 return true; 254 case Instruction::Mul: 255 return I.hasNoUnsignedWrap(); 256 default: 257 return false; 258 } 259 } 260 261 // Return true if the op promoted to i32 should have nuw set. 262 static bool promotedOpIsNUW(const Instruction &I) { 263 switch (I.getOpcode()) { 264 case Instruction::Shl: 265 case Instruction::Add: 266 case Instruction::Mul: 267 return true; 268 case Instruction::Sub: 269 return I.hasNoUnsignedWrap(); 270 default: 271 return false; 272 } 273 } 274 275 bool AMDGPUCodeGenPrepare::canWidenScalarExtLoad(LoadInst &I) const { 276 Type *Ty = I.getType(); 277 const DataLayout &DL = Mod->getDataLayout(); 278 int TySize = DL.getTypeSizeInBits(Ty); 279 unsigned Align = I.getAlignment() ? 280 I.getAlignment() : DL.getABITypeAlignment(Ty); 281 282 return I.isSimple() && TySize < 32 && Align >= 4 && DA->isUniform(&I); 283 } 284 285 bool AMDGPUCodeGenPrepare::promoteUniformOpToI32(BinaryOperator &I) const { 286 assert(needsPromotionToI32(I.getType()) && 287 "I does not need promotion to i32"); 288 289 if (I.getOpcode() == Instruction::SDiv || 290 I.getOpcode() == Instruction::UDiv || 291 I.getOpcode() == Instruction::SRem || 292 I.getOpcode() == Instruction::URem) 293 return false; 294 295 IRBuilder<> Builder(&I); 296 Builder.SetCurrentDebugLocation(I.getDebugLoc()); 297 298 Type *I32Ty = getI32Ty(Builder, I.getType()); 299 Value *ExtOp0 = nullptr; 300 Value *ExtOp1 = nullptr; 301 Value *ExtRes = nullptr; 302 Value *TruncRes = nullptr; 303 304 if (isSigned(I)) { 305 ExtOp0 = Builder.CreateSExt(I.getOperand(0), I32Ty); 306 ExtOp1 = Builder.CreateSExt(I.getOperand(1), I32Ty); 307 } else { 308 ExtOp0 = Builder.CreateZExt(I.getOperand(0), I32Ty); 309 ExtOp1 = Builder.CreateZExt(I.getOperand(1), I32Ty); 310 } 311 312 ExtRes = Builder.CreateBinOp(I.getOpcode(), ExtOp0, ExtOp1); 313 if (Instruction *Inst = dyn_cast<Instruction>(ExtRes)) { 314 if (promotedOpIsNSW(cast<Instruction>(I))) 315 Inst->setHasNoSignedWrap(); 316 317 if (promotedOpIsNUW(cast<Instruction>(I))) 318 Inst->setHasNoUnsignedWrap(); 319 320 if (const auto *ExactOp = dyn_cast<PossiblyExactOperator>(&I)) 321 Inst->setIsExact(ExactOp->isExact()); 322 } 323 324 TruncRes = Builder.CreateTrunc(ExtRes, I.getType()); 325 326 I.replaceAllUsesWith(TruncRes); 327 I.eraseFromParent(); 328 329 return true; 330 } 331 332 bool AMDGPUCodeGenPrepare::promoteUniformOpToI32(ICmpInst &I) const { 333 assert(needsPromotionToI32(I.getOperand(0)->getType()) && 334 "I does not need promotion to i32"); 335 336 IRBuilder<> Builder(&I); 337 Builder.SetCurrentDebugLocation(I.getDebugLoc()); 338 339 Type *I32Ty = getI32Ty(Builder, I.getOperand(0)->getType()); 340 Value *ExtOp0 = nullptr; 341 Value *ExtOp1 = nullptr; 342 Value *NewICmp = nullptr; 343 344 if (I.isSigned()) { 345 ExtOp0 = Builder.CreateSExt(I.getOperand(0), I32Ty); 346 ExtOp1 = Builder.CreateSExt(I.getOperand(1), I32Ty); 347 } else { 348 ExtOp0 = Builder.CreateZExt(I.getOperand(0), I32Ty); 349 ExtOp1 = Builder.CreateZExt(I.getOperand(1), I32Ty); 350 } 351 NewICmp = Builder.CreateICmp(I.getPredicate(), ExtOp0, ExtOp1); 352 353 I.replaceAllUsesWith(NewICmp); 354 I.eraseFromParent(); 355 356 return true; 357 } 358 359 bool AMDGPUCodeGenPrepare::promoteUniformOpToI32(SelectInst &I) const { 360 assert(needsPromotionToI32(I.getType()) && 361 "I does not need promotion to i32"); 362 363 IRBuilder<> Builder(&I); 364 Builder.SetCurrentDebugLocation(I.getDebugLoc()); 365 366 Type *I32Ty = getI32Ty(Builder, I.getType()); 367 Value *ExtOp1 = nullptr; 368 Value *ExtOp2 = nullptr; 369 Value *ExtRes = nullptr; 370 Value *TruncRes = nullptr; 371 372 if (isSigned(I)) { 373 ExtOp1 = Builder.CreateSExt(I.getOperand(1), I32Ty); 374 ExtOp2 = Builder.CreateSExt(I.getOperand(2), I32Ty); 375 } else { 376 ExtOp1 = Builder.CreateZExt(I.getOperand(1), I32Ty); 377 ExtOp2 = Builder.CreateZExt(I.getOperand(2), I32Ty); 378 } 379 ExtRes = Builder.CreateSelect(I.getOperand(0), ExtOp1, ExtOp2); 380 TruncRes = Builder.CreateTrunc(ExtRes, I.getType()); 381 382 I.replaceAllUsesWith(TruncRes); 383 I.eraseFromParent(); 384 385 return true; 386 } 387 388 bool AMDGPUCodeGenPrepare::promoteUniformBitreverseToI32( 389 IntrinsicInst &I) const { 390 assert(I.getIntrinsicID() == Intrinsic::bitreverse && 391 "I must be bitreverse intrinsic"); 392 assert(needsPromotionToI32(I.getType()) && 393 "I does not need promotion to i32"); 394 395 IRBuilder<> Builder(&I); 396 Builder.SetCurrentDebugLocation(I.getDebugLoc()); 397 398 Type *I32Ty = getI32Ty(Builder, I.getType()); 399 Function *I32 = 400 Intrinsic::getDeclaration(Mod, Intrinsic::bitreverse, { I32Ty }); 401 Value *ExtOp = Builder.CreateZExt(I.getOperand(0), I32Ty); 402 Value *ExtRes = Builder.CreateCall(I32, { ExtOp }); 403 Value *LShrOp = 404 Builder.CreateLShr(ExtRes, 32 - getBaseElementBitWidth(I.getType())); 405 Value *TruncRes = 406 Builder.CreateTrunc(LShrOp, I.getType()); 407 408 I.replaceAllUsesWith(TruncRes); 409 I.eraseFromParent(); 410 411 return true; 412 } 413 414 unsigned AMDGPUCodeGenPrepare::numBitsUnsigned(Value *Op, 415 unsigned ScalarSize) const { 416 KnownBits Known = computeKnownBits(Op, *DL, 0, AC); 417 return ScalarSize - Known.countMinLeadingZeros(); 418 } 419 420 unsigned AMDGPUCodeGenPrepare::numBitsSigned(Value *Op, 421 unsigned ScalarSize) const { 422 // In order for this to be a signed 24-bit value, bit 23, must 423 // be a sign bit. 424 return ScalarSize - ComputeNumSignBits(Op, *DL, 0, AC); 425 } 426 427 bool AMDGPUCodeGenPrepare::isI24(Value *V, unsigned ScalarSize) const { 428 return ScalarSize >= 24 && // Types less than 24-bit should be treated 429 // as unsigned 24-bit values. 430 numBitsSigned(V, ScalarSize) < 24; 431 } 432 433 bool AMDGPUCodeGenPrepare::isU24(Value *V, unsigned ScalarSize) const { 434 return numBitsUnsigned(V, ScalarSize) <= 24; 435 } 436 437 static void extractValues(IRBuilder<> &Builder, 438 SmallVectorImpl<Value *> &Values, Value *V) { 439 VectorType *VT = dyn_cast<VectorType>(V->getType()); 440 if (!VT) { 441 Values.push_back(V); 442 return; 443 } 444 445 for (int I = 0, E = VT->getNumElements(); I != E; ++I) 446 Values.push_back(Builder.CreateExtractElement(V, I)); 447 } 448 449 static Value *insertValues(IRBuilder<> &Builder, 450 Type *Ty, 451 SmallVectorImpl<Value *> &Values) { 452 if (Values.size() == 1) 453 return Values[0]; 454 455 Value *NewVal = UndefValue::get(Ty); 456 for (int I = 0, E = Values.size(); I != E; ++I) 457 NewVal = Builder.CreateInsertElement(NewVal, Values[I], I); 458 459 return NewVal; 460 } 461 462 bool AMDGPUCodeGenPrepare::replaceMulWithMul24(BinaryOperator &I) const { 463 if (I.getOpcode() != Instruction::Mul) 464 return false; 465 466 Type *Ty = I.getType(); 467 unsigned Size = Ty->getScalarSizeInBits(); 468 if (Size <= 16 && ST->has16BitInsts()) 469 return false; 470 471 // Prefer scalar if this could be s_mul_i32 472 if (DA->isUniform(&I)) 473 return false; 474 475 Value *LHS = I.getOperand(0); 476 Value *RHS = I.getOperand(1); 477 IRBuilder<> Builder(&I); 478 Builder.SetCurrentDebugLocation(I.getDebugLoc()); 479 480 Intrinsic::ID IntrID = Intrinsic::not_intrinsic; 481 482 // TODO: Should this try to match mulhi24? 483 if (ST->hasMulU24() && isU24(LHS, Size) && isU24(RHS, Size)) { 484 IntrID = Intrinsic::amdgcn_mul_u24; 485 } else if (ST->hasMulI24() && isI24(LHS, Size) && isI24(RHS, Size)) { 486 IntrID = Intrinsic::amdgcn_mul_i24; 487 } else 488 return false; 489 490 SmallVector<Value *, 4> LHSVals; 491 SmallVector<Value *, 4> RHSVals; 492 SmallVector<Value *, 4> ResultVals; 493 extractValues(Builder, LHSVals, LHS); 494 extractValues(Builder, RHSVals, RHS); 495 496 497 IntegerType *I32Ty = Builder.getInt32Ty(); 498 FunctionCallee Intrin = Intrinsic::getDeclaration(Mod, IntrID); 499 for (int I = 0, E = LHSVals.size(); I != E; ++I) { 500 Value *LHS, *RHS; 501 if (IntrID == Intrinsic::amdgcn_mul_u24) { 502 LHS = Builder.CreateZExtOrTrunc(LHSVals[I], I32Ty); 503 RHS = Builder.CreateZExtOrTrunc(RHSVals[I], I32Ty); 504 } else { 505 LHS = Builder.CreateSExtOrTrunc(LHSVals[I], I32Ty); 506 RHS = Builder.CreateSExtOrTrunc(RHSVals[I], I32Ty); 507 } 508 509 Value *Result = Builder.CreateCall(Intrin, {LHS, RHS}); 510 511 if (IntrID == Intrinsic::amdgcn_mul_u24) { 512 ResultVals.push_back(Builder.CreateZExtOrTrunc(Result, 513 LHSVals[I]->getType())); 514 } else { 515 ResultVals.push_back(Builder.CreateSExtOrTrunc(Result, 516 LHSVals[I]->getType())); 517 } 518 } 519 520 Value *NewVal = insertValues(Builder, Ty, ResultVals); 521 NewVal->takeName(&I); 522 I.replaceAllUsesWith(NewVal); 523 I.eraseFromParent(); 524 525 return true; 526 } 527 528 static bool shouldKeepFDivF32(Value *Num, bool UnsafeDiv, bool HasDenormals) { 529 const ConstantFP *CNum = dyn_cast<ConstantFP>(Num); 530 if (!CNum) 531 return HasDenormals; 532 533 if (UnsafeDiv) 534 return true; 535 536 bool IsOne = CNum->isExactlyValue(+1.0) || CNum->isExactlyValue(-1.0); 537 538 // Reciprocal f32 is handled separately without denormals. 539 return HasDenormals ^ IsOne; 540 } 541 542 // Insert an intrinsic for fast fdiv for safe math situations where we can 543 // reduce precision. Leave fdiv for situations where the generic node is 544 // expected to be optimized. 545 bool AMDGPUCodeGenPrepare::visitFDiv(BinaryOperator &FDiv) { 546 Type *Ty = FDiv.getType(); 547 548 if (!Ty->getScalarType()->isFloatTy()) 549 return false; 550 551 MDNode *FPMath = FDiv.getMetadata(LLVMContext::MD_fpmath); 552 if (!FPMath) 553 return false; 554 555 const FPMathOperator *FPOp = cast<const FPMathOperator>(&FDiv); 556 float ULP = FPOp->getFPAccuracy(); 557 if (ULP < 2.5f) 558 return false; 559 560 FastMathFlags FMF = FPOp->getFastMathFlags(); 561 bool UnsafeDiv = HasUnsafeFPMath || FMF.isFast() || 562 FMF.allowReciprocal(); 563 564 // With UnsafeDiv node will be optimized to just rcp and mul. 565 if (UnsafeDiv) 566 return false; 567 568 IRBuilder<> Builder(FDiv.getParent(), std::next(FDiv.getIterator()), FPMath); 569 Builder.setFastMathFlags(FMF); 570 Builder.SetCurrentDebugLocation(FDiv.getDebugLoc()); 571 572 Function *Decl = Intrinsic::getDeclaration(Mod, Intrinsic::amdgcn_fdiv_fast); 573 574 Value *Num = FDiv.getOperand(0); 575 Value *Den = FDiv.getOperand(1); 576 577 Value *NewFDiv = nullptr; 578 579 if (VectorType *VT = dyn_cast<VectorType>(Ty)) { 580 NewFDiv = UndefValue::get(VT); 581 582 // FIXME: Doesn't do the right thing for cases where the vector is partially 583 // constant. This works when the scalarizer pass is run first. 584 for (unsigned I = 0, E = VT->getNumElements(); I != E; ++I) { 585 Value *NumEltI = Builder.CreateExtractElement(Num, I); 586 Value *DenEltI = Builder.CreateExtractElement(Den, I); 587 Value *NewElt; 588 589 if (shouldKeepFDivF32(NumEltI, UnsafeDiv, HasFP32Denormals)) { 590 NewElt = Builder.CreateFDiv(NumEltI, DenEltI); 591 } else { 592 NewElt = Builder.CreateCall(Decl, { NumEltI, DenEltI }); 593 } 594 595 NewFDiv = Builder.CreateInsertElement(NewFDiv, NewElt, I); 596 } 597 } else { 598 if (!shouldKeepFDivF32(Num, UnsafeDiv, HasFP32Denormals)) 599 NewFDiv = Builder.CreateCall(Decl, { Num, Den }); 600 } 601 602 if (NewFDiv) { 603 FDiv.replaceAllUsesWith(NewFDiv); 604 NewFDiv->takeName(&FDiv); 605 FDiv.eraseFromParent(); 606 } 607 608 return !!NewFDiv; 609 } 610 611 static bool hasUnsafeFPMath(const Function &F) { 612 Attribute Attr = F.getFnAttribute("unsafe-fp-math"); 613 return Attr.getValueAsString() == "true"; 614 } 615 616 static std::pair<Value*, Value*> getMul64(IRBuilder<> &Builder, 617 Value *LHS, Value *RHS) { 618 Type *I32Ty = Builder.getInt32Ty(); 619 Type *I64Ty = Builder.getInt64Ty(); 620 621 Value *LHS_EXT64 = Builder.CreateZExt(LHS, I64Ty); 622 Value *RHS_EXT64 = Builder.CreateZExt(RHS, I64Ty); 623 Value *MUL64 = Builder.CreateMul(LHS_EXT64, RHS_EXT64); 624 Value *Lo = Builder.CreateTrunc(MUL64, I32Ty); 625 Value *Hi = Builder.CreateLShr(MUL64, Builder.getInt64(32)); 626 Hi = Builder.CreateTrunc(Hi, I32Ty); 627 return std::make_pair(Lo, Hi); 628 } 629 630 static Value* getMulHu(IRBuilder<> &Builder, Value *LHS, Value *RHS) { 631 return getMul64(Builder, LHS, RHS).second; 632 } 633 634 // The fractional part of a float is enough to accurately represent up to 635 // a 24-bit signed integer. 636 Value* AMDGPUCodeGenPrepare::expandDivRem24(IRBuilder<> &Builder, 637 BinaryOperator &I, 638 Value *Num, Value *Den, 639 bool IsDiv, bool IsSigned) const { 640 assert(Num->getType()->isIntegerTy(32)); 641 642 const DataLayout &DL = Mod->getDataLayout(); 643 unsigned LHSSignBits = ComputeNumSignBits(Num, DL, 0, AC, &I); 644 if (LHSSignBits < 9) 645 return nullptr; 646 647 unsigned RHSSignBits = ComputeNumSignBits(Den, DL, 0, AC, &I); 648 if (RHSSignBits < 9) 649 return nullptr; 650 651 652 unsigned SignBits = std::min(LHSSignBits, RHSSignBits); 653 unsigned DivBits = 32 - SignBits; 654 if (IsSigned) 655 ++DivBits; 656 657 Type *I32Ty = Builder.getInt32Ty(); 658 Type *F32Ty = Builder.getFloatTy(); 659 ConstantInt *One = Builder.getInt32(1); 660 Value *JQ = One; 661 662 if (IsSigned) { 663 // char|short jq = ia ^ ib; 664 JQ = Builder.CreateXor(Num, Den); 665 666 // jq = jq >> (bitsize - 2) 667 JQ = Builder.CreateAShr(JQ, Builder.getInt32(30)); 668 669 // jq = jq | 0x1 670 JQ = Builder.CreateOr(JQ, One); 671 } 672 673 // int ia = (int)LHS; 674 Value *IA = Num; 675 676 // int ib, (int)RHS; 677 Value *IB = Den; 678 679 // float fa = (float)ia; 680 Value *FA = IsSigned ? Builder.CreateSIToFP(IA, F32Ty) 681 : Builder.CreateUIToFP(IA, F32Ty); 682 683 // float fb = (float)ib; 684 Value *FB = IsSigned ? Builder.CreateSIToFP(IB,F32Ty) 685 : Builder.CreateUIToFP(IB,F32Ty); 686 687 Value *RCP = Builder.CreateFDiv(ConstantFP::get(F32Ty, 1.0), FB); 688 Value *FQM = Builder.CreateFMul(FA, RCP); 689 690 // fq = trunc(fqm); 691 CallInst *FQ = Builder.CreateUnaryIntrinsic(Intrinsic::trunc, FQM); 692 FQ->copyFastMathFlags(Builder.getFastMathFlags()); 693 694 // float fqneg = -fq; 695 Value *FQNeg = Builder.CreateFNeg(FQ); 696 697 // float fr = mad(fqneg, fb, fa); 698 Value *FR = Builder.CreateIntrinsic(Intrinsic::amdgcn_fmad_ftz, 699 {FQNeg->getType()}, {FQNeg, FB, FA}, FQ); 700 701 // int iq = (int)fq; 702 Value *IQ = IsSigned ? Builder.CreateFPToSI(FQ, I32Ty) 703 : Builder.CreateFPToUI(FQ, I32Ty); 704 705 // fr = fabs(fr); 706 FR = Builder.CreateUnaryIntrinsic(Intrinsic::fabs, FR, FQ); 707 708 // fb = fabs(fb); 709 FB = Builder.CreateUnaryIntrinsic(Intrinsic::fabs, FB, FQ); 710 711 // int cv = fr >= fb; 712 Value *CV = Builder.CreateFCmpOGE(FR, FB); 713 714 // jq = (cv ? jq : 0); 715 JQ = Builder.CreateSelect(CV, JQ, Builder.getInt32(0)); 716 717 // dst = iq + jq; 718 Value *Div = Builder.CreateAdd(IQ, JQ); 719 720 Value *Res = Div; 721 if (!IsDiv) { 722 // Rem needs compensation, it's easier to recompute it 723 Value *Rem = Builder.CreateMul(Div, Den); 724 Res = Builder.CreateSub(Num, Rem); 725 } 726 727 // Extend in register from the number of bits this divide really is. 728 if (IsSigned) { 729 Res = Builder.CreateShl(Res, 32 - DivBits); 730 Res = Builder.CreateAShr(Res, 32 - DivBits); 731 } else { 732 ConstantInt *TruncMask = Builder.getInt32((UINT64_C(1) << DivBits) - 1); 733 Res = Builder.CreateAnd(Res, TruncMask); 734 } 735 736 return Res; 737 } 738 739 Value* AMDGPUCodeGenPrepare::expandDivRem32(IRBuilder<> &Builder, 740 BinaryOperator &I, 741 Value *Num, Value *Den) const { 742 Instruction::BinaryOps Opc = I.getOpcode(); 743 assert(Opc == Instruction::URem || Opc == Instruction::UDiv || 744 Opc == Instruction::SRem || Opc == Instruction::SDiv); 745 746 FastMathFlags FMF; 747 FMF.setFast(); 748 Builder.setFastMathFlags(FMF); 749 750 if (isa<Constant>(Den)) 751 return nullptr; // Keep it for optimization 752 753 bool IsDiv = Opc == Instruction::UDiv || Opc == Instruction::SDiv; 754 bool IsSigned = Opc == Instruction::SRem || Opc == Instruction::SDiv; 755 756 Type *Ty = Num->getType(); 757 Type *I32Ty = Builder.getInt32Ty(); 758 Type *F32Ty = Builder.getFloatTy(); 759 760 if (Ty->getScalarSizeInBits() < 32) { 761 if (IsSigned) { 762 Num = Builder.CreateSExt(Num, I32Ty); 763 Den = Builder.CreateSExt(Den, I32Ty); 764 } else { 765 Num = Builder.CreateZExt(Num, I32Ty); 766 Den = Builder.CreateZExt(Den, I32Ty); 767 } 768 } 769 770 if (Value *Res = expandDivRem24(Builder, I, Num, Den, IsDiv, IsSigned)) { 771 Res = Builder.CreateTrunc(Res, Ty); 772 return Res; 773 } 774 775 ConstantInt *Zero = Builder.getInt32(0); 776 ConstantInt *One = Builder.getInt32(1); 777 ConstantInt *MinusOne = Builder.getInt32(~0); 778 779 Value *Sign = nullptr; 780 if (IsSigned) { 781 ConstantInt *K31 = Builder.getInt32(31); 782 Value *LHSign = Builder.CreateAShr(Num, K31); 783 Value *RHSign = Builder.CreateAShr(Den, K31); 784 // Remainder sign is the same as LHS 785 Sign = IsDiv ? Builder.CreateXor(LHSign, RHSign) : LHSign; 786 787 Num = Builder.CreateAdd(Num, LHSign); 788 Den = Builder.CreateAdd(Den, RHSign); 789 790 Num = Builder.CreateXor(Num, LHSign); 791 Den = Builder.CreateXor(Den, RHSign); 792 } 793 794 // RCP = URECIP(Den) = 2^32 / Den + e 795 // e is rounding error. 796 Value *DEN_F32 = Builder.CreateUIToFP(Den, F32Ty); 797 Value *RCP_F32 = Builder.CreateFDiv(ConstantFP::get(F32Ty, 1.0), DEN_F32); 798 Constant *UINT_MAX_PLUS_1 = ConstantFP::get(F32Ty, BitsToFloat(0x4f800000)); 799 Value *RCP_SCALE = Builder.CreateFMul(RCP_F32, UINT_MAX_PLUS_1); 800 Value *RCP = Builder.CreateFPToUI(RCP_SCALE, I32Ty); 801 802 // RCP_LO, RCP_HI = mul(RCP, Den) */ 803 Value *RCP_LO, *RCP_HI; 804 std::tie(RCP_LO, RCP_HI) = getMul64(Builder, RCP, Den); 805 806 // NEG_RCP_LO = -RCP_LO 807 Value *NEG_RCP_LO = Builder.CreateNeg(RCP_LO); 808 809 // ABS_RCP_LO = (RCP_HI == 0 ? NEG_RCP_LO : RCP_LO) 810 Value *RCP_HI_0_CC = Builder.CreateICmpEQ(RCP_HI, Zero); 811 Value *ABS_RCP_LO = Builder.CreateSelect(RCP_HI_0_CC, NEG_RCP_LO, RCP_LO); 812 813 // Calculate the rounding error from the URECIP instruction 814 // E = mulhu(ABS_RCP_LO, RCP) 815 Value *E = getMulHu(Builder, ABS_RCP_LO, RCP); 816 817 // RCP_A_E = RCP + E 818 Value *RCP_A_E = Builder.CreateAdd(RCP, E); 819 820 // RCP_S_E = RCP - E 821 Value *RCP_S_E = Builder.CreateSub(RCP, E); 822 823 // Tmp0 = (RCP_HI == 0 ? RCP_A_E : RCP_SUB_E) 824 Value *Tmp0 = Builder.CreateSelect(RCP_HI_0_CC, RCP_A_E, RCP_S_E); 825 826 // Quotient = mulhu(Tmp0, Num) 827 Value *Quotient = getMulHu(Builder, Tmp0, Num); 828 829 // Num_S_Remainder = Quotient * Den 830 Value *Num_S_Remainder = Builder.CreateMul(Quotient, Den); 831 832 // Remainder = Num - Num_S_Remainder 833 Value *Remainder = Builder.CreateSub(Num, Num_S_Remainder); 834 835 // Remainder_GE_Den = (Remainder >= Den ? -1 : 0) 836 Value *Rem_GE_Den_CC = Builder.CreateICmpUGE(Remainder, Den); 837 Value *Remainder_GE_Den = Builder.CreateSelect(Rem_GE_Den_CC, MinusOne, Zero); 838 839 // Remainder_GE_Zero = (Num >= Num_S_Remainder ? -1 : 0) 840 Value *Num_GE_Num_S_Rem_CC = Builder.CreateICmpUGE(Num, Num_S_Remainder); 841 Value *Remainder_GE_Zero = Builder.CreateSelect(Num_GE_Num_S_Rem_CC, 842 MinusOne, Zero); 843 844 // Tmp1 = Remainder_GE_Den & Remainder_GE_Zero 845 Value *Tmp1 = Builder.CreateAnd(Remainder_GE_Den, Remainder_GE_Zero); 846 Value *Tmp1_0_CC = Builder.CreateICmpEQ(Tmp1, Zero); 847 848 Value *Res; 849 if (IsDiv) { 850 // Quotient_A_One = Quotient + 1 851 Value *Quotient_A_One = Builder.CreateAdd(Quotient, One); 852 853 // Quotient_S_One = Quotient - 1 854 Value *Quotient_S_One = Builder.CreateSub(Quotient, One); 855 856 // Div = (Tmp1 == 0 ? Quotient : Quotient_A_One) 857 Value *Div = Builder.CreateSelect(Tmp1_0_CC, Quotient, Quotient_A_One); 858 859 // Div = (Remainder_GE_Zero == 0 ? Quotient_S_One : Div) 860 Res = Builder.CreateSelect(Num_GE_Num_S_Rem_CC, Div, Quotient_S_One); 861 } else { 862 // Remainder_S_Den = Remainder - Den 863 Value *Remainder_S_Den = Builder.CreateSub(Remainder, Den); 864 865 // Remainder_A_Den = Remainder + Den 866 Value *Remainder_A_Den = Builder.CreateAdd(Remainder, Den); 867 868 // Rem = (Tmp1 == 0 ? Remainder : Remainder_S_Den) 869 Value *Rem = Builder.CreateSelect(Tmp1_0_CC, Remainder, Remainder_S_Den); 870 871 // Rem = (Remainder_GE_Zero == 0 ? Remainder_A_Den : Rem) 872 Res = Builder.CreateSelect(Num_GE_Num_S_Rem_CC, Rem, Remainder_A_Den); 873 } 874 875 if (IsSigned) { 876 Res = Builder.CreateXor(Res, Sign); 877 Res = Builder.CreateSub(Res, Sign); 878 } 879 880 Res = Builder.CreateTrunc(Res, Ty); 881 882 return Res; 883 } 884 885 bool AMDGPUCodeGenPrepare::visitBinaryOperator(BinaryOperator &I) { 886 if (ST->has16BitInsts() && needsPromotionToI32(I.getType()) && 887 DA->isUniform(&I) && promoteUniformOpToI32(I)) 888 return true; 889 890 if (UseMul24Intrin && replaceMulWithMul24(I)) 891 return true; 892 893 bool Changed = false; 894 Instruction::BinaryOps Opc = I.getOpcode(); 895 Type *Ty = I.getType(); 896 Value *NewDiv = nullptr; 897 if ((Opc == Instruction::URem || Opc == Instruction::UDiv || 898 Opc == Instruction::SRem || Opc == Instruction::SDiv) && 899 Ty->getScalarSizeInBits() <= 32) { 900 Value *Num = I.getOperand(0); 901 Value *Den = I.getOperand(1); 902 IRBuilder<> Builder(&I); 903 Builder.SetCurrentDebugLocation(I.getDebugLoc()); 904 905 if (VectorType *VT = dyn_cast<VectorType>(Ty)) { 906 NewDiv = UndefValue::get(VT); 907 908 for (unsigned N = 0, E = VT->getNumElements(); N != E; ++N) { 909 Value *NumEltN = Builder.CreateExtractElement(Num, N); 910 Value *DenEltN = Builder.CreateExtractElement(Den, N); 911 Value *NewElt = expandDivRem32(Builder, I, NumEltN, DenEltN); 912 if (!NewElt) 913 NewElt = Builder.CreateBinOp(Opc, NumEltN, DenEltN); 914 NewDiv = Builder.CreateInsertElement(NewDiv, NewElt, N); 915 } 916 } else { 917 NewDiv = expandDivRem32(Builder, I, Num, Den); 918 } 919 920 if (NewDiv) { 921 I.replaceAllUsesWith(NewDiv); 922 I.eraseFromParent(); 923 Changed = true; 924 } 925 } 926 927 return Changed; 928 } 929 930 bool AMDGPUCodeGenPrepare::visitLoadInst(LoadInst &I) { 931 if (!WidenLoads) 932 return false; 933 934 if ((I.getPointerAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS || 935 I.getPointerAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT) && 936 canWidenScalarExtLoad(I)) { 937 IRBuilder<> Builder(&I); 938 Builder.SetCurrentDebugLocation(I.getDebugLoc()); 939 940 Type *I32Ty = Builder.getInt32Ty(); 941 Type *PT = PointerType::get(I32Ty, I.getPointerAddressSpace()); 942 Value *BitCast= Builder.CreateBitCast(I.getPointerOperand(), PT); 943 LoadInst *WidenLoad = Builder.CreateLoad(I32Ty, BitCast); 944 WidenLoad->copyMetadata(I); 945 946 // If we have range metadata, we need to convert the type, and not make 947 // assumptions about the high bits. 948 if (auto *Range = WidenLoad->getMetadata(LLVMContext::MD_range)) { 949 ConstantInt *Lower = 950 mdconst::extract<ConstantInt>(Range->getOperand(0)); 951 952 if (Lower->getValue().isNullValue()) { 953 WidenLoad->setMetadata(LLVMContext::MD_range, nullptr); 954 } else { 955 Metadata *LowAndHigh[] = { 956 ConstantAsMetadata::get(ConstantInt::get(I32Ty, Lower->getValue().zext(32))), 957 // Don't make assumptions about the high bits. 958 ConstantAsMetadata::get(ConstantInt::get(I32Ty, 0)) 959 }; 960 961 WidenLoad->setMetadata(LLVMContext::MD_range, 962 MDNode::get(Mod->getContext(), LowAndHigh)); 963 } 964 } 965 966 int TySize = Mod->getDataLayout().getTypeSizeInBits(I.getType()); 967 Type *IntNTy = Builder.getIntNTy(TySize); 968 Value *ValTrunc = Builder.CreateTrunc(WidenLoad, IntNTy); 969 Value *ValOrig = Builder.CreateBitCast(ValTrunc, I.getType()); 970 I.replaceAllUsesWith(ValOrig); 971 I.eraseFromParent(); 972 return true; 973 } 974 975 return false; 976 } 977 978 bool AMDGPUCodeGenPrepare::visitICmpInst(ICmpInst &I) { 979 bool Changed = false; 980 981 if (ST->has16BitInsts() && needsPromotionToI32(I.getOperand(0)->getType()) && 982 DA->isUniform(&I)) 983 Changed |= promoteUniformOpToI32(I); 984 985 return Changed; 986 } 987 988 bool AMDGPUCodeGenPrepare::visitSelectInst(SelectInst &I) { 989 bool Changed = false; 990 991 if (ST->has16BitInsts() && needsPromotionToI32(I.getType()) && 992 DA->isUniform(&I)) 993 Changed |= promoteUniformOpToI32(I); 994 995 return Changed; 996 } 997 998 bool AMDGPUCodeGenPrepare::visitIntrinsicInst(IntrinsicInst &I) { 999 switch (I.getIntrinsicID()) { 1000 case Intrinsic::bitreverse: 1001 return visitBitreverseIntrinsicInst(I); 1002 default: 1003 return false; 1004 } 1005 } 1006 1007 bool AMDGPUCodeGenPrepare::visitBitreverseIntrinsicInst(IntrinsicInst &I) { 1008 bool Changed = false; 1009 1010 if (ST->has16BitInsts() && needsPromotionToI32(I.getType()) && 1011 DA->isUniform(&I)) 1012 Changed |= promoteUniformBitreverseToI32(I); 1013 1014 return Changed; 1015 } 1016 1017 bool AMDGPUCodeGenPrepare::doInitialization(Module &M) { 1018 Mod = &M; 1019 DL = &Mod->getDataLayout(); 1020 return false; 1021 } 1022 1023 bool AMDGPUCodeGenPrepare::runOnFunction(Function &F) { 1024 if (skipFunction(F)) 1025 return false; 1026 1027 auto *TPC = getAnalysisIfAvailable<TargetPassConfig>(); 1028 if (!TPC) 1029 return false; 1030 1031 const AMDGPUTargetMachine &TM = TPC->getTM<AMDGPUTargetMachine>(); 1032 ST = &TM.getSubtarget<GCNSubtarget>(F); 1033 AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 1034 DA = &getAnalysis<LegacyDivergenceAnalysis>(); 1035 HasUnsafeFPMath = hasUnsafeFPMath(F); 1036 HasFP32Denormals = ST->hasFP32Denormals(F); 1037 1038 bool MadeChange = false; 1039 1040 for (BasicBlock &BB : F) { 1041 BasicBlock::iterator Next; 1042 for (BasicBlock::iterator I = BB.begin(), E = BB.end(); I != E; I = Next) { 1043 Next = std::next(I); 1044 MadeChange |= visit(*I); 1045 } 1046 } 1047 1048 return MadeChange; 1049 } 1050 1051 INITIALIZE_PASS_BEGIN(AMDGPUCodeGenPrepare, DEBUG_TYPE, 1052 "AMDGPU IR optimizations", false, false) 1053 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 1054 INITIALIZE_PASS_DEPENDENCY(LegacyDivergenceAnalysis) 1055 INITIALIZE_PASS_END(AMDGPUCodeGenPrepare, DEBUG_TYPE, "AMDGPU IR optimizations", 1056 false, false) 1057 1058 char AMDGPUCodeGenPrepare::ID = 0; 1059 1060 FunctionPass *llvm::createAMDGPUCodeGenPreparePass() { 1061 return new AMDGPUCodeGenPrepare(); 1062 } 1063