1 //===----- ARMCodeGenPrepare.cpp ------------------------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 /// \file 11 /// This pass inserts intrinsics to handle small types that would otherwise be 12 /// promoted during legalization. Here we can manually promote types or insert 13 /// intrinsics which can handle narrow types that aren't supported by the 14 /// register classes. 15 // 16 //===----------------------------------------------------------------------===// 17 18 #include "ARM.h" 19 #include "ARMSubtarget.h" 20 #include "ARMTargetMachine.h" 21 #include "llvm/ADT/StringRef.h" 22 #include "llvm/CodeGen/Passes.h" 23 #include "llvm/CodeGen/TargetPassConfig.h" 24 #include "llvm/IR/Attributes.h" 25 #include "llvm/IR/BasicBlock.h" 26 #include "llvm/IR/IRBuilder.h" 27 #include "llvm/IR/Constants.h" 28 #include "llvm/IR/InstrTypes.h" 29 #include "llvm/IR/Instruction.h" 30 #include "llvm/IR/Instructions.h" 31 #include "llvm/IR/IntrinsicInst.h" 32 #include "llvm/IR/Intrinsics.h" 33 #include "llvm/IR/Type.h" 34 #include "llvm/IR/Value.h" 35 #include "llvm/IR/Verifier.h" 36 #include "llvm/Pass.h" 37 #include "llvm/Support/Casting.h" 38 #include "llvm/Support/CommandLine.h" 39 40 #define DEBUG_TYPE "arm-codegenprepare" 41 42 using namespace llvm; 43 44 static cl::opt<bool> 45 DisableCGP("arm-disable-cgp", cl::Hidden, cl::init(true), 46 cl::desc("Disable ARM specific CodeGenPrepare pass")); 47 48 static cl::opt<bool> 49 EnableDSP("arm-enable-scalar-dsp", cl::Hidden, cl::init(false), 50 cl::desc("Use DSP instructions for scalar operations")); 51 52 static cl::opt<bool> 53 EnableDSPWithImms("arm-enable-scalar-dsp-imms", cl::Hidden, cl::init(false), 54 cl::desc("Use DSP instructions for scalar operations\ 55 with immediate operands")); 56 57 // The goal of this pass is to enable more efficient code generation for 58 // operations on narrow types (i.e. types with < 32-bits) and this is a 59 // motivating IR code example: 60 // 61 // define hidden i32 @cmp(i8 zeroext) { 62 // %2 = add i8 %0, -49 63 // %3 = icmp ult i8 %2, 3 64 // .. 65 // } 66 // 67 // The issue here is that i8 is type-legalized to i32 because i8 is not a 68 // legal type. Thus, arithmetic is done in integer-precision, but then the 69 // byte value is masked out as follows: 70 // 71 // t19: i32 = add t4, Constant:i32<-49> 72 // t24: i32 = and t19, Constant:i32<255> 73 // 74 // Consequently, we generate code like this: 75 // 76 // subs r0, #49 77 // uxtb r1, r0 78 // cmp r1, #3 79 // 80 // This shows that masking out the byte value results in generation of 81 // the UXTB instruction. This is not optimal as r0 already contains the byte 82 // value we need, and so instead we can just generate: 83 // 84 // sub.w r1, r0, #49 85 // cmp r1, #3 86 // 87 // We achieve this by type promoting the IR to i32 like so for this example: 88 // 89 // define i32 @cmp(i8 zeroext %c) { 90 // %0 = zext i8 %c to i32 91 // %c.off = add i32 %0, -49 92 // %1 = icmp ult i32 %c.off, 3 93 // .. 94 // } 95 // 96 // For this to be valid and legal, we need to prove that the i32 add is 97 // producing the same value as the i8 addition, and that e.g. no overflow 98 // happens. 99 // 100 // A brief sketch of the algorithm and some terminology. 101 // We pattern match interesting IR patterns: 102 // - which have "sources": instructions producing narrow values (i8, i16), and 103 // - they have "sinks": instructions consuming these narrow values. 104 // 105 // We collect all instruction connecting sources and sinks in a worklist, so 106 // that we can mutate these instruction and perform type promotion when it is 107 // legal to do so. 108 109 namespace { 110 class IRPromoter { 111 SmallPtrSet<Value*, 8> NewInsts; 112 SmallPtrSet<Instruction*, 4> InstsToRemove; 113 DenseMap<Value*, SmallVector<Type*, 4>> TruncTysMap; 114 SmallPtrSet<Value*, 8> Promoted; 115 Module *M = nullptr; 116 LLVMContext &Ctx; 117 IntegerType *ExtTy = nullptr; 118 IntegerType *OrigTy = nullptr; 119 SmallPtrSetImpl<Value*> *Visited; 120 SmallPtrSetImpl<Value*> *Sources; 121 SmallPtrSetImpl<Instruction*> *Sinks; 122 SmallPtrSetImpl<Instruction*> *SafeToPromote; 123 124 void ReplaceAllUsersOfWith(Value *From, Value *To); 125 void PrepareConstants(void); 126 void ExtendSources(void); 127 void ConvertTruncs(void); 128 void PromoteTree(void); 129 void TruncateSinks(void); 130 void Cleanup(void); 131 132 public: 133 IRPromoter(Module *M) : M(M), Ctx(M->getContext()), 134 ExtTy(Type::getInt32Ty(Ctx)) { } 135 136 137 void Mutate(Type *OrigTy, 138 SmallPtrSetImpl<Value*> &Visited, 139 SmallPtrSetImpl<Value*> &Sources, 140 SmallPtrSetImpl<Instruction*> &Sinks, 141 SmallPtrSetImpl<Instruction*> &SafeToPromote); 142 }; 143 144 class ARMCodeGenPrepare : public FunctionPass { 145 const ARMSubtarget *ST = nullptr; 146 IRPromoter *Promoter = nullptr; 147 std::set<Value*> AllVisited; 148 SmallPtrSet<Instruction*, 8> SafeToPromote; 149 150 bool isSafeOverflow(Instruction *I); 151 bool isSupportedValue(Value *V); 152 bool isLegalToPromote(Value *V); 153 bool TryToPromote(Value *V); 154 155 public: 156 static char ID; 157 static unsigned TypeSize; 158 Type *OrigTy = nullptr; 159 160 ARMCodeGenPrepare() : FunctionPass(ID) {} 161 162 void getAnalysisUsage(AnalysisUsage &AU) const override { 163 AU.addRequired<TargetPassConfig>(); 164 } 165 166 StringRef getPassName() const override { return "ARM IR optimizations"; } 167 168 bool doInitialization(Module &M) override; 169 bool runOnFunction(Function &F) override; 170 bool doFinalization(Module &M) override; 171 }; 172 173 } 174 175 static bool generateSignBits(Value *V) { 176 if (!isa<Instruction>(V)) 177 return false; 178 179 unsigned Opc = cast<Instruction>(V)->getOpcode(); 180 return Opc == Instruction::AShr || Opc == Instruction::SDiv || 181 Opc == Instruction::SRem; 182 } 183 184 static bool EqualTypeSize(Value *V) { 185 return V->getType()->getScalarSizeInBits() == ARMCodeGenPrepare::TypeSize; 186 } 187 188 static bool LessOrEqualTypeSize(Value *V) { 189 return V->getType()->getScalarSizeInBits() <= ARMCodeGenPrepare::TypeSize; 190 } 191 192 static bool GreaterThanTypeSize(Value *V) { 193 return V->getType()->getScalarSizeInBits() > ARMCodeGenPrepare::TypeSize; 194 } 195 196 static bool LessThanTypeSize(Value *V) { 197 return V->getType()->getScalarSizeInBits() < ARMCodeGenPrepare::TypeSize; 198 } 199 200 /// Some instructions can use 8- and 16-bit operands, and we don't need to 201 /// promote anything larger. We disallow booleans to make life easier when 202 /// dealing with icmps but allow any other integer that is <= 16 bits. Void 203 /// types are accepted so we can handle switches. 204 static bool isSupportedType(Value *V) { 205 Type *Ty = V->getType(); 206 207 // Allow voids and pointers, these won't be promoted. 208 if (Ty->isVoidTy() || Ty->isPointerTy()) 209 return true; 210 211 if (auto *Ld = dyn_cast<LoadInst>(V)) 212 Ty = cast<PointerType>(Ld->getPointerOperandType())->getElementType(); 213 214 if (!isa<IntegerType>(Ty) || 215 cast<IntegerType>(V->getType())->getBitWidth() == 1) 216 return false; 217 218 return LessOrEqualTypeSize(V); 219 } 220 221 /// Return true if the given value is a source in the use-def chain, producing 222 /// a narrow 'TypeSize' value. These values will be zext to start the promotion 223 /// of the tree to i32. We guarantee that these won't populate the upper bits 224 /// of the register. ZExt on the loads will be free, and the same for call 225 /// return values because we only accept ones that guarantee a zeroext ret val. 226 /// Many arguments will have the zeroext attribute too, so those would be free 227 /// too. 228 static bool isSource(Value *V) { 229 if (!isa<IntegerType>(V->getType())) 230 return false; 231 232 // TODO Allow zext to be sources. 233 if (isa<Argument>(V)) 234 return true; 235 else if (isa<LoadInst>(V)) 236 return true; 237 else if (isa<BitCastInst>(V)) 238 return true; 239 else if (auto *Call = dyn_cast<CallInst>(V)) 240 return Call->hasRetAttr(Attribute::AttrKind::ZExt); 241 else if (auto *Trunc = dyn_cast<TruncInst>(V)) 242 return EqualTypeSize(Trunc); 243 return false; 244 } 245 246 /// Return true if V will require any promoted values to be truncated for the 247 /// the IR to remain valid. We can't mutate the value type of these 248 /// instructions. 249 static bool isSink(Value *V) { 250 // TODO The truncate also isn't actually necessary because we would already 251 // proved that the data value is kept within the range of the original data 252 // type. 253 254 // Sinks are: 255 // - points where the value in the register is being observed, such as an 256 // icmp, switch or store. 257 // - points where value types have to match, such as calls and returns. 258 // - zext are included to ease the transformation and are generally removed 259 // later on. 260 if (auto *Store = dyn_cast<StoreInst>(V)) 261 return LessOrEqualTypeSize(Store->getValueOperand()); 262 if (auto *Return = dyn_cast<ReturnInst>(V)) 263 return LessOrEqualTypeSize(Return->getReturnValue()); 264 if (auto *ZExt = dyn_cast<ZExtInst>(V)) 265 return GreaterThanTypeSize(ZExt); 266 if (auto *Switch = dyn_cast<SwitchInst>(V)) 267 return LessThanTypeSize(Switch->getCondition()); 268 if (auto *ICmp = dyn_cast<ICmpInst>(V)) 269 return ICmp->isSigned() || LessThanTypeSize(ICmp->getOperand(0)); 270 271 return isa<CallInst>(V); 272 } 273 274 /// Return whether the instruction can be promoted within any modifications to 275 /// its operands or result. 276 bool ARMCodeGenPrepare::isSafeOverflow(Instruction *I) { 277 // FIXME Do we need NSW too? 278 if (isa<OverflowingBinaryOperator>(I) && I->hasNoUnsignedWrap()) 279 return true; 280 281 // We can support a, potentially, overflowing instruction (I) if: 282 // - It is only used by an unsigned icmp. 283 // - The icmp uses a constant. 284 // - The overflowing value (I) is decreasing, i.e would underflow - wrapping 285 // around zero to become a larger number than before. 286 // - The underflowing instruction (I) also uses a constant. 287 // 288 // We can then use the two constants to calculate whether the result would 289 // wrap in respect to itself in the original bitwidth. If it doesn't wrap, 290 // just underflows the range, the icmp would give the same result whether the 291 // result has been truncated or not. We calculate this by: 292 // - Zero extending both constants, if needed, to 32-bits. 293 // - Take the absolute value of I's constant, adding this to the icmp const. 294 // - Check that this value is not out of range for small type. If it is, it 295 // means that it has underflowed enough to wrap around the icmp constant. 296 // 297 // For example: 298 // 299 // %sub = sub i8 %a, 2 300 // %cmp = icmp ule i8 %sub, 254 301 // 302 // If %a = 0, %sub = -2 == FE == 254 303 // But if this is evalulated as a i32 304 // %sub = -2 == FF FF FF FE == 4294967294 305 // So the unsigned compares (i8 and i32) would not yield the same result. 306 // 307 // Another way to look at it is: 308 // %a - 2 <= 254 309 // %a + 2 <= 254 + 2 310 // %a <= 256 311 // And we can't represent 256 in the i8 format, so we don't support it. 312 // 313 // Whereas: 314 // 315 // %sub i8 %a, 1 316 // %cmp = icmp ule i8 %sub, 254 317 // 318 // If %a = 0, %sub = -1 == FF == 255 319 // As i32: 320 // %sub = -1 == FF FF FF FF == 4294967295 321 // 322 // In this case, the unsigned compare results would be the same and this 323 // would also be true for ult, uge and ugt: 324 // - (255 < 254) == (0xFFFFFFFF < 254) == false 325 // - (255 <= 254) == (0xFFFFFFFF <= 254) == false 326 // - (255 > 254) == (0xFFFFFFFF > 254) == true 327 // - (255 >= 254) == (0xFFFFFFFF >= 254) == true 328 // 329 // To demonstrate why we can't handle increasing values: 330 // 331 // %add = add i8 %a, 2 332 // %cmp = icmp ult i8 %add, 127 333 // 334 // If %a = 254, %add = 256 == (i8 1) 335 // As i32: 336 // %add = 256 337 // 338 // (1 < 127) != (256 < 127) 339 340 unsigned Opc = I->getOpcode(); 341 if (Opc != Instruction::Add && Opc != Instruction::Sub) 342 return false; 343 344 if (!I->hasOneUse() || 345 !isa<ICmpInst>(*I->user_begin()) || 346 !isa<ConstantInt>(I->getOperand(1))) 347 return false; 348 349 ConstantInt *OverflowConst = cast<ConstantInt>(I->getOperand(1)); 350 bool NegImm = OverflowConst->isNegative(); 351 bool IsDecreasing = ((Opc == Instruction::Sub) && !NegImm) || 352 ((Opc == Instruction::Add) && NegImm); 353 if (!IsDecreasing) 354 return false; 355 356 // Don't support an icmp that deals with sign bits. 357 auto *CI = cast<ICmpInst>(*I->user_begin()); 358 if (CI->isSigned() || CI->isEquality()) 359 return false; 360 361 ConstantInt *ICmpConst = nullptr; 362 if (auto *Const = dyn_cast<ConstantInt>(CI->getOperand(0))) 363 ICmpConst = Const; 364 else if (auto *Const = dyn_cast<ConstantInt>(CI->getOperand(1))) 365 ICmpConst = Const; 366 else 367 return false; 368 369 // Now check that the result can't wrap on itself. 370 APInt Total = ICmpConst->getValue().getBitWidth() < 32 ? 371 ICmpConst->getValue().zext(32) : ICmpConst->getValue(); 372 373 Total += OverflowConst->getValue().getBitWidth() < 32 ? 374 OverflowConst->getValue().abs().zext(32) : OverflowConst->getValue().abs(); 375 376 APInt Max = APInt::getAllOnesValue(ARMCodeGenPrepare::TypeSize); 377 378 if (Total.getBitWidth() > Max.getBitWidth()) { 379 if (Total.ugt(Max.zext(Total.getBitWidth()))) 380 return false; 381 } else if (Max.getBitWidth() > Total.getBitWidth()) { 382 if (Total.zext(Max.getBitWidth()).ugt(Max)) 383 return false; 384 } else if (Total.ugt(Max)) 385 return false; 386 387 LLVM_DEBUG(dbgs() << "ARM CGP: Allowing safe overflow for " << *I << "\n"); 388 return true; 389 } 390 391 static bool shouldPromote(Value *V) { 392 if (!isa<IntegerType>(V->getType()) || isSink(V)) 393 return false; 394 395 if (isSource(V)) 396 return true; 397 398 auto *I = dyn_cast<Instruction>(V); 399 if (!I) 400 return false; 401 402 if (isa<ICmpInst>(I)) 403 return false; 404 405 return true; 406 } 407 408 /// Return whether we can safely mutate V's type to ExtTy without having to be 409 /// concerned with zero extending or truncation. 410 static bool isPromotedResultSafe(Value *V) { 411 if (!isa<Instruction>(V)) 412 return true; 413 414 if (generateSignBits(V)) 415 return false; 416 417 return !isa<OverflowingBinaryOperator>(V); 418 } 419 420 /// Return the intrinsic for the instruction that can perform the same 421 /// operation but on a narrow type. This is using the parallel dsp intrinsics 422 /// on scalar values. 423 static Intrinsic::ID getNarrowIntrinsic(Instruction *I) { 424 // Whether we use the signed or unsigned versions of these intrinsics 425 // doesn't matter because we're not using the GE bits that they set in 426 // the APSR. 427 switch(I->getOpcode()) { 428 default: 429 break; 430 case Instruction::Add: 431 return ARMCodeGenPrepare::TypeSize == 16 ? Intrinsic::arm_uadd16 : 432 Intrinsic::arm_uadd8; 433 case Instruction::Sub: 434 return ARMCodeGenPrepare::TypeSize == 16 ? Intrinsic::arm_usub16 : 435 Intrinsic::arm_usub8; 436 } 437 llvm_unreachable("unhandled opcode for narrow intrinsic"); 438 } 439 440 void IRPromoter::ReplaceAllUsersOfWith(Value *From, Value *To) { 441 SmallVector<Instruction*, 4> Users; 442 Instruction *InstTo = dyn_cast<Instruction>(To); 443 bool ReplacedAll = true; 444 445 LLVM_DEBUG(dbgs() << "ARM CGP: Replacing " << *From << " with " << *To 446 << "\n"); 447 448 for (Use &U : From->uses()) { 449 auto *User = cast<Instruction>(U.getUser()); 450 if (InstTo && User->isIdenticalTo(InstTo)) { 451 ReplacedAll = false; 452 continue; 453 } 454 Users.push_back(User); 455 } 456 457 for (auto *U : Users) 458 U->replaceUsesOfWith(From, To); 459 460 if (ReplacedAll) 461 if (auto *I = dyn_cast<Instruction>(From)) 462 InstsToRemove.insert(I); 463 } 464 465 void IRPromoter::PrepareConstants() { 466 IRBuilder<> Builder{Ctx}; 467 // First step is to prepare the instructions for mutation. Most constants 468 // just need to be zero extended into their new type, but complications arise 469 // because: 470 // - For nuw binary operators, negative immediates would need sign extending; 471 // however, instead we'll change them to positive and zext them. We can do 472 // this because: 473 // > The operators that can wrap are: add, sub, mul and shl. 474 // > shl interprets its second operand as unsigned and if the first operand 475 // is an immediate, it will need zext to be nuw. 476 // > I'm assuming mul has to interpret immediates as unsigned for nuw. 477 // > Which leaves the nuw add and sub to be handled; as with shl, if an 478 // immediate is used as operand 0, it will need zext to be nuw. 479 // - We also allow add and sub to safely overflow in certain circumstances 480 // and only when the value (operand 0) is being decreased. 481 // 482 // For adds and subs, that are either nuw or safely wrap and use a negative 483 // immediate as operand 1, we create an equivalent instruction using a 484 // positive immediate. That positive immediate can then be zext along with 485 // all the other immediates later. 486 for (auto *V : *Visited) { 487 if (!isa<Instruction>(V)) 488 continue; 489 490 auto *I = cast<Instruction>(V); 491 if (SafeToPromote->count(I)) { 492 493 if (!isa<OverflowingBinaryOperator>(I)) 494 continue; 495 496 if (auto *Const = dyn_cast<ConstantInt>(I->getOperand(1))) { 497 if (!Const->isNegative()) 498 break; 499 500 unsigned Opc = I->getOpcode(); 501 if (Opc != Instruction::Add && Opc != Instruction::Sub) 502 continue; 503 504 LLVM_DEBUG(dbgs() << "ARM CGP: Adjusting " << *I << "\n"); 505 auto *NewConst = ConstantInt::get(Ctx, Const->getValue().abs()); 506 Builder.SetInsertPoint(I); 507 Value *NewVal = Opc == Instruction::Sub ? 508 Builder.CreateAdd(I->getOperand(0), NewConst) : 509 Builder.CreateSub(I->getOperand(0), NewConst); 510 LLVM_DEBUG(dbgs() << "ARM CGP: New equivalent: " << *NewVal << "\n"); 511 512 if (auto *NewInst = dyn_cast<Instruction>(NewVal)) { 513 NewInst->copyIRFlags(I); 514 NewInsts.insert(NewInst); 515 } 516 InstsToRemove.insert(I); 517 I->replaceAllUsesWith(NewVal); 518 } 519 } 520 } 521 for (auto *I : NewInsts) 522 Visited->insert(I); 523 } 524 525 void IRPromoter::ExtendSources() { 526 IRBuilder<> Builder{Ctx}; 527 528 auto InsertZExt = [&](Value *V, Instruction *InsertPt) { 529 assert(V->getType() != ExtTy && "zext already extends to i32"); 530 LLVM_DEBUG(dbgs() << "ARM CGP: Inserting ZExt for " << *V << "\n"); 531 Builder.SetInsertPoint(InsertPt); 532 if (auto *I = dyn_cast<Instruction>(V)) 533 Builder.SetCurrentDebugLocation(I->getDebugLoc()); 534 535 Value *ZExt = Builder.CreateZExt(V, ExtTy); 536 if (auto *I = dyn_cast<Instruction>(ZExt)) { 537 if (isa<Argument>(V)) 538 I->moveBefore(InsertPt); 539 else 540 I->moveAfter(InsertPt); 541 NewInsts.insert(I); 542 } 543 544 ReplaceAllUsersOfWith(V, ZExt); 545 }; 546 547 // Now, insert extending instructions between the sources and their users. 548 LLVM_DEBUG(dbgs() << "ARM CGP: Promoting sources:\n"); 549 for (auto V : *Sources) { 550 LLVM_DEBUG(dbgs() << " - " << *V << "\n"); 551 if (auto *I = dyn_cast<Instruction>(V)) 552 InsertZExt(I, I); 553 else if (auto *Arg = dyn_cast<Argument>(V)) { 554 BasicBlock &BB = Arg->getParent()->front(); 555 InsertZExt(Arg, &*BB.getFirstInsertionPt()); 556 } else { 557 llvm_unreachable("unhandled source that needs extending"); 558 } 559 Promoted.insert(V); 560 } 561 } 562 563 void IRPromoter::PromoteTree() { 564 LLVM_DEBUG(dbgs() << "ARM CGP: Mutating the tree..\n"); 565 566 IRBuilder<> Builder{Ctx}; 567 568 // Mutate the types of the instructions within the tree. Here we handle 569 // constant operands. 570 for (auto *V : *Visited) { 571 if (Sources->count(V)) 572 continue; 573 574 auto *I = cast<Instruction>(V); 575 if (Sinks->count(I)) 576 continue; 577 578 for (unsigned i = 0, e = I->getNumOperands(); i < e; ++i) { 579 Value *Op = I->getOperand(i); 580 if ((Op->getType() == ExtTy) || !isa<IntegerType>(Op->getType())) 581 continue; 582 583 if (auto *Const = dyn_cast<ConstantInt>(Op)) { 584 Constant *NewConst = ConstantExpr::getZExt(Const, ExtTy); 585 I->setOperand(i, NewConst); 586 } else if (isa<UndefValue>(Op)) 587 I->setOperand(i, UndefValue::get(ExtTy)); 588 } 589 590 if (shouldPromote(I)) { 591 I->mutateType(ExtTy); 592 Promoted.insert(I); 593 } 594 } 595 596 // Finally, any instructions that should be promoted but haven't yet been, 597 // need to be handled using intrinsics. 598 for (auto *V : *Visited) { 599 auto *I = dyn_cast<Instruction>(V); 600 if (!I) 601 continue; 602 603 if (Sources->count(I) || Sinks->count(I)) 604 continue; 605 606 if (!shouldPromote(I) || SafeToPromote->count(I) || NewInsts.count(I)) 607 continue; 608 609 assert(EnableDSP && "DSP intrinisc insertion not enabled!"); 610 611 // Replace unsafe instructions with appropriate intrinsic calls. 612 LLVM_DEBUG(dbgs() << "ARM CGP: Inserting DSP intrinsic for " 613 << *I << "\n"); 614 Function *DSPInst = 615 Intrinsic::getDeclaration(M, getNarrowIntrinsic(I)); 616 Builder.SetInsertPoint(I); 617 Builder.SetCurrentDebugLocation(I->getDebugLoc()); 618 Value *Args[] = { I->getOperand(0), I->getOperand(1) }; 619 CallInst *Call = Builder.CreateCall(DSPInst, Args); 620 NewInsts.insert(Call); 621 ReplaceAllUsersOfWith(I, Call); 622 } 623 } 624 625 void IRPromoter::TruncateSinks() { 626 LLVM_DEBUG(dbgs() << "ARM CGP: Fixing up the sinks:\n"); 627 628 IRBuilder<> Builder{Ctx}; 629 630 auto InsertTrunc = [&](Value *V, Type *TruncTy) -> Instruction* { 631 if (!isa<Instruction>(V) || !isa<IntegerType>(V->getType())) 632 return nullptr; 633 634 if ((!Promoted.count(V) && !NewInsts.count(V)) || Sources->count(V)) 635 return nullptr; 636 637 LLVM_DEBUG(dbgs() << "ARM CGP: Creating " << *TruncTy << " Trunc for " 638 << *V << "\n"); 639 Builder.SetInsertPoint(cast<Instruction>(V)); 640 auto *Trunc = dyn_cast<Instruction>(Builder.CreateTrunc(V, TruncTy)); 641 if (Trunc) 642 NewInsts.insert(Trunc); 643 return Trunc; 644 }; 645 646 // Fix up any stores or returns that use the results of the promoted 647 // chain. 648 for (auto I : *Sinks) { 649 LLVM_DEBUG(dbgs() << "ARM CGP: For Sink: " << *I << "\n"); 650 651 // Handle calls separately as we need to iterate over arg operands. 652 if (auto *Call = dyn_cast<CallInst>(I)) { 653 for (unsigned i = 0; i < Call->getNumArgOperands(); ++i) { 654 Value *Arg = Call->getArgOperand(i); 655 Type *Ty = TruncTysMap[Call][i]; 656 if (Instruction *Trunc = InsertTrunc(Arg, Ty)) { 657 Trunc->moveBefore(Call); 658 Call->setArgOperand(i, Trunc); 659 } 660 } 661 continue; 662 } 663 664 // Special case switches because we need to truncate the condition. 665 if (auto *Switch = dyn_cast<SwitchInst>(I)) { 666 Type *Ty = TruncTysMap[Switch][0]; 667 if (Instruction *Trunc = InsertTrunc(Switch->getCondition(), Ty)) { 668 Trunc->moveBefore(Switch); 669 Switch->setCondition(Trunc); 670 } 671 continue; 672 } 673 674 // Now handle the others. 675 for (unsigned i = 0; i < I->getNumOperands(); ++i) { 676 Type *Ty = TruncTysMap[I][i]; 677 if (Instruction *Trunc = InsertTrunc(I->getOperand(i), Ty)) { 678 Trunc->moveBefore(I); 679 I->setOperand(i, Trunc); 680 } 681 } 682 } 683 } 684 685 void IRPromoter::Cleanup() { 686 // Some zexts will now have become redundant, along with their trunc 687 // operands, so remove them 688 for (auto V : *Visited) { 689 if (!isa<CastInst>(V)) 690 continue; 691 692 auto ZExt = cast<CastInst>(V); 693 if (ZExt->getDestTy() != ExtTy) 694 continue; 695 696 Value *Src = ZExt->getOperand(0); 697 if (ZExt->getSrcTy() == ZExt->getDestTy()) { 698 LLVM_DEBUG(dbgs() << "ARM CGP: Removing unnecessary cast: " << *ZExt 699 << "\n"); 700 ReplaceAllUsersOfWith(ZExt, Src); 701 continue; 702 } 703 704 // For any truncs that we insert to handle zexts, we can replace the 705 // result of the zext with the input to the trunc. 706 if (NewInsts.count(Src) && isa<ZExtInst>(V) && isa<TruncInst>(Src)) { 707 auto *Trunc = cast<TruncInst>(Src); 708 assert(Trunc->getOperand(0)->getType() == ExtTy && 709 "expected inserted trunc to be operating on i32"); 710 ReplaceAllUsersOfWith(ZExt, Trunc->getOperand(0)); 711 } 712 } 713 714 for (auto *I : InstsToRemove) { 715 LLVM_DEBUG(dbgs() << "ARM CGP: Removing " << *I << "\n"); 716 I->dropAllReferences(); 717 I->eraseFromParent(); 718 } 719 720 InstsToRemove.clear(); 721 NewInsts.clear(); 722 TruncTysMap.clear(); 723 Promoted.clear(); 724 } 725 726 void IRPromoter::ConvertTruncs() { 727 IRBuilder<> Builder{Ctx}; 728 729 for (auto *V : *Visited) { 730 if (!isa<TruncInst>(V) || Sources->count(V)) 731 continue; 732 733 auto *Trunc = cast<TruncInst>(V); 734 assert(LessThanTypeSize(Trunc) && "expected narrow trunc"); 735 736 Builder.SetInsertPoint(Trunc); 737 unsigned NumBits = 738 cast<IntegerType>(Trunc->getType())->getScalarSizeInBits(); 739 ConstantInt *Mask = ConstantInt::get(Ctx, APInt::getMaxValue(NumBits)); 740 Value *Masked = Builder.CreateAnd(Trunc->getOperand(0), Mask); 741 742 if (auto *I = dyn_cast<Instruction>(Masked)) 743 NewInsts.insert(I); 744 745 ReplaceAllUsersOfWith(Trunc, Masked); 746 } 747 } 748 749 void IRPromoter::Mutate(Type *OrigTy, 750 SmallPtrSetImpl<Value*> &Visited, 751 SmallPtrSetImpl<Value*> &Sources, 752 SmallPtrSetImpl<Instruction*> &Sinks, 753 SmallPtrSetImpl<Instruction*> &SafeToPromote) { 754 LLVM_DEBUG(dbgs() << "ARM CGP: Promoting use-def chains to from " 755 << ARMCodeGenPrepare::TypeSize << " to 32-bits\n"); 756 757 assert(isa<IntegerType>(OrigTy) && "expected integer type"); 758 this->OrigTy = cast<IntegerType>(OrigTy); 759 assert(OrigTy->getPrimitiveSizeInBits() < ExtTy->getPrimitiveSizeInBits() && 760 "original type not smaller than extended type"); 761 762 this->Visited = &Visited; 763 this->Sources = &Sources; 764 this->Sinks = &Sinks; 765 this->SafeToPromote = &SafeToPromote; 766 767 // Cache original types of the values that will likely need truncating 768 for (auto *I : Sinks) { 769 if (auto *Call = dyn_cast<CallInst>(I)) { 770 for (unsigned i = 0; i < Call->getNumArgOperands(); ++i) { 771 Value *Arg = Call->getArgOperand(i); 772 TruncTysMap[Call].push_back(Arg->getType()); 773 } 774 } else if (auto *Switch = dyn_cast<SwitchInst>(I)) 775 TruncTysMap[I].push_back(Switch->getCondition()->getType()); 776 else { 777 for (unsigned i = 0; i < I->getNumOperands(); ++i) 778 TruncTysMap[I].push_back(I->getOperand(i)->getType()); 779 } 780 } 781 782 // Convert adds and subs using negative immediates to equivalent instructions 783 // that use positive constants. 784 PrepareConstants(); 785 786 // Insert zext instructions between sources and their users. 787 ExtendSources(); 788 789 // Convert any truncs, that aren't sources, into AND masks. 790 ConvertTruncs(); 791 792 // Promote visited instructions, mutating their types in place. Also insert 793 // DSP intrinsics, if enabled, for adds and subs which would be unsafe to 794 // promote. 795 PromoteTree(); 796 797 // Insert trunc instructions for use by calls, stores etc... 798 TruncateSinks(); 799 800 // Finally, remove unecessary zexts and truncs, delete old instructions and 801 // clear the data structures. 802 Cleanup(); 803 804 LLVM_DEBUG(dbgs() << "ARM CGP: Mutation complete\n"); 805 } 806 807 /// We accept most instructions, as well as Arguments and ConstantInsts. We 808 /// Disallow casts other than zext and truncs and only allow calls if their 809 /// return value is zeroext. We don't allow opcodes that can introduce sign 810 /// bits. 811 bool ARMCodeGenPrepare::isSupportedValue(Value *V) { 812 if (auto *I = dyn_cast<ICmpInst>(V)) { 813 // Now that we allow small types than TypeSize, only allow icmp of 814 // TypeSize because they will require a trunc to be legalised. 815 // TODO: Allow icmp of smaller types, and calculate at the end 816 // whether the transform would be beneficial. 817 if (isa<PointerType>(I->getOperand(0)->getType())) 818 return true; 819 return EqualTypeSize(I->getOperand(0)); 820 } 821 822 // Memory instructions 823 if (isa<StoreInst>(V) || isa<GetElementPtrInst>(V)) 824 return true; 825 826 // Branches and targets. 827 if( isa<BranchInst>(V) || isa<SwitchInst>(V) || isa<BasicBlock>(V)) 828 return true; 829 830 // Non-instruction values that we can handle. 831 if ((isa<Constant>(V) && !isa<ConstantExpr>(V)) || isa<Argument>(V)) 832 return isSupportedType(V); 833 834 if (isa<PHINode>(V) || isa<SelectInst>(V) || isa<ReturnInst>(V) || 835 isa<LoadInst>(V)) 836 return isSupportedType(V); 837 838 if (isa<SExtInst>(V)) 839 return false; 840 841 if (auto *Cast = dyn_cast<CastInst>(V)) 842 return isSupportedType(Cast) || isSupportedType(Cast->getOperand(0)); 843 844 // Special cases for calls as we need to check for zeroext 845 // TODO We should accept calls even if they don't have zeroext, as they can 846 // still be sinks. 847 if (auto *Call = dyn_cast<CallInst>(V)) 848 return isSupportedType(Call) && 849 Call->hasRetAttr(Attribute::AttrKind::ZExt); 850 851 if (!isa<BinaryOperator>(V)) 852 return false; 853 854 if (!isSupportedType(V)) 855 return false; 856 857 if (generateSignBits(V)) { 858 LLVM_DEBUG(dbgs() << "ARM CGP: No, instruction can generate sign bits.\n"); 859 return false; 860 } 861 return true; 862 } 863 864 /// Check that the type of V would be promoted and that the original type is 865 /// smaller than the targeted promoted type. Check that we're not trying to 866 /// promote something larger than our base 'TypeSize' type. 867 bool ARMCodeGenPrepare::isLegalToPromote(Value *V) { 868 869 auto *I = dyn_cast<Instruction>(V); 870 if (!I) 871 return true; 872 873 if (SafeToPromote.count(I)) 874 return true; 875 876 if (isPromotedResultSafe(V) || isSafeOverflow(I)) { 877 SafeToPromote.insert(I); 878 return true; 879 } 880 881 if (I->getOpcode() != Instruction::Add && I->getOpcode() != Instruction::Sub) 882 return false; 883 884 // If promotion is not safe, can we use a DSP instruction to natively 885 // handle the narrow type? 886 if (!ST->hasDSP() || !EnableDSP || !isSupportedType(I)) 887 return false; 888 889 if (ST->isThumb() && !ST->hasThumb2()) 890 return false; 891 892 // TODO 893 // Would it be profitable? For Thumb code, these parallel DSP instructions 894 // are only Thumb-2, so we wouldn't be able to dual issue on Cortex-M33. For 895 // Cortex-A, specifically Cortex-A72, the latency is double and throughput is 896 // halved. They also do not take immediates as operands. 897 for (auto &Op : I->operands()) { 898 if (isa<Constant>(Op)) { 899 if (!EnableDSPWithImms) 900 return false; 901 } 902 } 903 LLVM_DEBUG(dbgs() << "ARM CGP: Will use an intrinsic for: " << *I << "\n"); 904 return true; 905 } 906 907 bool ARMCodeGenPrepare::TryToPromote(Value *V) { 908 OrigTy = V->getType(); 909 TypeSize = OrigTy->getPrimitiveSizeInBits(); 910 if (TypeSize > 16 || TypeSize < 8) 911 return false; 912 913 SafeToPromote.clear(); 914 915 if (!isSupportedValue(V) || !shouldPromote(V) || !isLegalToPromote(V)) 916 return false; 917 918 LLVM_DEBUG(dbgs() << "ARM CGP: TryToPromote: " << *V << ", TypeSize = " 919 << TypeSize << "\n"); 920 921 SetVector<Value*> WorkList; 922 SmallPtrSet<Value*, 8> Sources; 923 SmallPtrSet<Instruction*, 4> Sinks; 924 SmallPtrSet<Value*, 16> CurrentVisited; 925 WorkList.insert(V); 926 927 // Return true if V was added to the worklist as a supported instruction, 928 // if it was already visited, or if we don't need to explore it (e.g. 929 // pointer values and GEPs), and false otherwise. 930 auto AddLegalInst = [&](Value *V) { 931 if (CurrentVisited.count(V)) 932 return true; 933 934 // Ignore GEPs because they don't need promoting and the constant indices 935 // will prevent the transformation. 936 if (isa<GetElementPtrInst>(V)) 937 return true; 938 939 if (!isSupportedValue(V) || (shouldPromote(V) && !isLegalToPromote(V))) { 940 LLVM_DEBUG(dbgs() << "ARM CGP: Can't handle: " << *V << "\n"); 941 return false; 942 } 943 944 WorkList.insert(V); 945 return true; 946 }; 947 948 // Iterate through, and add to, a tree of operands and users in the use-def. 949 while (!WorkList.empty()) { 950 Value *V = WorkList.back(); 951 WorkList.pop_back(); 952 if (CurrentVisited.count(V)) 953 continue; 954 955 // Ignore non-instructions, other than arguments. 956 if (!isa<Instruction>(V) && !isSource(V)) 957 continue; 958 959 // If we've already visited this value from somewhere, bail now because 960 // the tree has already been explored. 961 // TODO: This could limit the transform, ie if we try to promote something 962 // from an i8 and fail first, before trying an i16. 963 if (AllVisited.count(V)) 964 return false; 965 966 CurrentVisited.insert(V); 967 AllVisited.insert(V); 968 969 // Calls can be both sources and sinks. 970 if (isSink(V)) 971 Sinks.insert(cast<Instruction>(V)); 972 973 if (isSource(V)) 974 Sources.insert(V); 975 976 if (!isSink(V) && !isSource(V)) { 977 if (auto *I = dyn_cast<Instruction>(V)) { 978 // Visit operands of any instruction visited. 979 for (auto &U : I->operands()) { 980 if (!AddLegalInst(U)) 981 return false; 982 } 983 } 984 } 985 986 // Don't visit users of a node which isn't going to be mutated unless its a 987 // source. 988 if (isSource(V) || shouldPromote(V)) { 989 for (Use &U : V->uses()) { 990 if (!AddLegalInst(U.getUser())) 991 return false; 992 } 993 } 994 } 995 996 LLVM_DEBUG(dbgs() << "ARM CGP: Visited nodes:\n"; 997 for (auto *I : CurrentVisited) 998 I->dump(); 999 ); 1000 unsigned ToPromote = 0; 1001 for (auto *V : CurrentVisited) { 1002 if (Sources.count(V)) 1003 continue; 1004 if (Sinks.count(cast<Instruction>(V))) 1005 continue; 1006 ++ToPromote; 1007 } 1008 1009 if (ToPromote < 2) 1010 return false; 1011 1012 Promoter->Mutate(OrigTy, CurrentVisited, Sources, Sinks, SafeToPromote); 1013 return true; 1014 } 1015 1016 bool ARMCodeGenPrepare::doInitialization(Module &M) { 1017 Promoter = new IRPromoter(&M); 1018 return false; 1019 } 1020 1021 bool ARMCodeGenPrepare::runOnFunction(Function &F) { 1022 if (skipFunction(F) || DisableCGP) 1023 return false; 1024 1025 auto *TPC = &getAnalysis<TargetPassConfig>(); 1026 if (!TPC) 1027 return false; 1028 1029 const TargetMachine &TM = TPC->getTM<TargetMachine>(); 1030 ST = &TM.getSubtarget<ARMSubtarget>(F); 1031 bool MadeChange = false; 1032 LLVM_DEBUG(dbgs() << "ARM CGP: Running on " << F.getName() << "\n"); 1033 1034 // Search up from icmps to try to promote their operands. 1035 for (BasicBlock &BB : F) { 1036 auto &Insts = BB.getInstList(); 1037 for (auto &I : Insts) { 1038 if (AllVisited.count(&I)) 1039 continue; 1040 1041 if (isa<ICmpInst>(I)) { 1042 auto &CI = cast<ICmpInst>(I); 1043 1044 // Skip signed or pointer compares 1045 if (CI.isSigned() || !isa<IntegerType>(CI.getOperand(0)->getType())) 1046 continue; 1047 1048 LLVM_DEBUG(dbgs() << "ARM CGP: Searching from: " << CI << "\n"); 1049 1050 for (auto &Op : CI.operands()) { 1051 if (auto *I = dyn_cast<Instruction>(Op)) 1052 MadeChange |= TryToPromote(I); 1053 } 1054 } 1055 } 1056 LLVM_DEBUG(if (verifyFunction(F, &dbgs())) { 1057 dbgs() << F; 1058 report_fatal_error("Broken function after type promotion"); 1059 }); 1060 } 1061 if (MadeChange) 1062 LLVM_DEBUG(dbgs() << "After ARMCodeGenPrepare: " << F << "\n"); 1063 1064 return MadeChange; 1065 } 1066 1067 bool ARMCodeGenPrepare::doFinalization(Module &M) { 1068 delete Promoter; 1069 return false; 1070 } 1071 1072 INITIALIZE_PASS_BEGIN(ARMCodeGenPrepare, DEBUG_TYPE, 1073 "ARM IR optimizations", false, false) 1074 INITIALIZE_PASS_END(ARMCodeGenPrepare, DEBUG_TYPE, "ARM IR optimizations", 1075 false, false) 1076 1077 char ARMCodeGenPrepare::ID = 0; 1078 unsigned ARMCodeGenPrepare::TypeSize = 0; 1079 1080 FunctionPass *llvm::createARMCodeGenPreparePass() { 1081 return new ARMCodeGenPrepare(); 1082 } 1083