1 //===----- X86CallFrameOptimization.cpp - Optimize x86 call sequences -----===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file defines a pass that optimizes call sequences on x86. 11 // Currently, it converts movs of function parameters onto the stack into 12 // pushes. This is beneficial for two main reasons: 13 // 1) The push instruction encoding is much smaller than a stack-ptr-based mov. 14 // 2) It is possible to push memory arguments directly. So, if the 15 // the transformation is performed pre-reg-alloc, it can help relieve 16 // register pressure. 17 // 18 //===----------------------------------------------------------------------===// 19 20 #include <algorithm> 21 22 #include "X86.h" 23 #include "X86InstrInfo.h" 24 #include "X86MachineFunctionInfo.h" 25 #include "X86Subtarget.h" 26 #include "llvm/ADT/Statistic.h" 27 #include "llvm/CodeGen/MachineFunctionPass.h" 28 #include "llvm/CodeGen/MachineInstrBuilder.h" 29 #include "llvm/CodeGen/MachineModuleInfo.h" 30 #include "llvm/CodeGen/MachineRegisterInfo.h" 31 #include "llvm/CodeGen/Passes.h" 32 #include "llvm/IR/Function.h" 33 #include "llvm/Support/Debug.h" 34 #include "llvm/Support/raw_ostream.h" 35 #include "llvm/Target/TargetInstrInfo.h" 36 37 using namespace llvm; 38 39 #define DEBUG_TYPE "x86-cf-opt" 40 41 static cl::opt<bool> 42 NoX86CFOpt("no-x86-call-frame-opt", 43 cl::desc("Avoid optimizing x86 call frames for size"), 44 cl::init(false), cl::Hidden); 45 46 namespace { 47 class X86CallFrameOptimization : public MachineFunctionPass { 48 public: 49 X86CallFrameOptimization() : MachineFunctionPass(ID) {} 50 51 bool runOnMachineFunction(MachineFunction &MF) override; 52 53 private: 54 // Information we know about a particular call site 55 struct CallContext { 56 CallContext() 57 : FrameSetup(nullptr), Call(nullptr), SPCopy(nullptr), ExpectedDist(0), 58 MovVector(4, nullptr), NoStackParams(false), UsePush(false) {} 59 60 // Iterator referring to the frame setup instruction 61 MachineBasicBlock::iterator FrameSetup; 62 63 // Actual call instruction 64 MachineInstr *Call; 65 66 // A copy of the stack pointer 67 MachineInstr *SPCopy; 68 69 // The total displacement of all passed parameters 70 int64_t ExpectedDist; 71 72 // The sequence of movs used to pass the parameters 73 SmallVector<MachineInstr *, 4> MovVector; 74 75 // True if this call site has no stack parameters 76 bool NoStackParams; 77 78 // True if this call site can use push instructions 79 bool UsePush; 80 }; 81 82 typedef SmallVector<CallContext, 8> ContextVector; 83 84 bool isLegal(MachineFunction &MF); 85 86 bool isProfitable(MachineFunction &MF, ContextVector &CallSeqMap); 87 88 void collectCallInfo(MachineFunction &MF, MachineBasicBlock &MBB, 89 MachineBasicBlock::iterator I, CallContext &Context); 90 91 void adjustCallSequence(MachineFunction &MF, const CallContext &Context); 92 93 MachineInstr *canFoldIntoRegPush(MachineBasicBlock::iterator FrameSetup, 94 unsigned Reg); 95 96 enum InstClassification { Convert, Skip, Exit }; 97 98 InstClassification classifyInstruction(MachineBasicBlock &MBB, 99 MachineBasicBlock::iterator MI, 100 const X86RegisterInfo &RegInfo, 101 DenseSet<unsigned int> &UsedRegs); 102 103 StringRef getPassName() const override { return "X86 Optimize Call Frame"; } 104 105 const TargetInstrInfo *TII; 106 const X86FrameLowering *TFL; 107 const X86Subtarget *STI; 108 MachineRegisterInfo *MRI; 109 unsigned SlotSize; 110 unsigned Log2SlotSize; 111 static char ID; 112 }; 113 114 char X86CallFrameOptimization::ID = 0; 115 } // end anonymous namespace 116 117 FunctionPass *llvm::createX86CallFrameOptimization() { 118 return new X86CallFrameOptimization(); 119 } 120 121 // This checks whether the transformation is legal. 122 // Also returns false in cases where it's potentially legal, but 123 // we don't even want to try. 124 bool X86CallFrameOptimization::isLegal(MachineFunction &MF) { 125 if (NoX86CFOpt.getValue()) 126 return false; 127 128 // Work around LLVM PR30879 (bad interaction between CFO and libunwind) 129 if (STI->isTargetFreeBSD() && STI->is32Bit() && 130 STI->getTargetTriple().getOSMajorVersion() >= 12) 131 return false; 132 133 // We can't encode multiple DW_CFA_GNU_args_size or DW_CFA_def_cfa_offset 134 // in the compact unwind encoding that Darwin uses. So, bail if there 135 // is a danger of that being generated. 136 if (STI->isTargetDarwin() && 137 (!MF.getLandingPads().empty() || 138 (MF.getFunction()->needsUnwindTableEntry() && !TFL->hasFP(MF)))) 139 return false; 140 141 // It is not valid to change the stack pointer outside the prolog/epilog 142 // on 64-bit Windows. 143 if (STI->isTargetWin64()) 144 return false; 145 146 // You would expect straight-line code between call-frame setup and 147 // call-frame destroy. You would be wrong. There are circumstances (e.g. 148 // CMOV_GR8 expansion of a select that feeds a function call!) where we can 149 // end up with the setup and the destroy in different basic blocks. 150 // This is bad, and breaks SP adjustment. 151 // So, check that all of the frames in the function are closed inside 152 // the same block, and, for good measure, that there are no nested frames. 153 unsigned FrameSetupOpcode = TII->getCallFrameSetupOpcode(); 154 unsigned FrameDestroyOpcode = TII->getCallFrameDestroyOpcode(); 155 for (MachineBasicBlock &BB : MF) { 156 bool InsideFrameSequence = false; 157 for (MachineInstr &MI : BB) { 158 if (MI.getOpcode() == FrameSetupOpcode) { 159 if (InsideFrameSequence) 160 return false; 161 InsideFrameSequence = true; 162 } else if (MI.getOpcode() == FrameDestroyOpcode) { 163 if (!InsideFrameSequence) 164 return false; 165 InsideFrameSequence = false; 166 } 167 } 168 169 if (InsideFrameSequence) 170 return false; 171 } 172 173 return true; 174 } 175 176 // Check whether this transformation is profitable for a particular 177 // function - in terms of code size. 178 bool X86CallFrameOptimization::isProfitable(MachineFunction &MF, 179 ContextVector &CallSeqVector) { 180 // This transformation is always a win when we do not expect to have 181 // a reserved call frame. Under other circumstances, it may be either 182 // a win or a loss, and requires a heuristic. 183 bool CannotReserveFrame = MF.getFrameInfo().hasVarSizedObjects(); 184 if (CannotReserveFrame) 185 return true; 186 187 unsigned StackAlign = TFL->getStackAlignment(); 188 189 int64_t Advantage = 0; 190 for (auto CC : CallSeqVector) { 191 // Call sites where no parameters are passed on the stack 192 // do not affect the cost, since there needs to be no 193 // stack adjustment. 194 if (CC.NoStackParams) 195 continue; 196 197 if (!CC.UsePush) { 198 // If we don't use pushes for a particular call site, 199 // we pay for not having a reserved call frame with an 200 // additional sub/add esp pair. The cost is ~3 bytes per instruction, 201 // depending on the size of the constant. 202 // TODO: Callee-pop functions should have a smaller penalty, because 203 // an add is needed even with a reserved call frame. 204 Advantage -= 6; 205 } else { 206 // We can use pushes. First, account for the fixed costs. 207 // We'll need a add after the call. 208 Advantage -= 3; 209 // If we have to realign the stack, we'll also need a sub before 210 if (CC.ExpectedDist % StackAlign) 211 Advantage -= 3; 212 // Now, for each push, we save ~3 bytes. For small constants, we actually, 213 // save more (up to 5 bytes), but 3 should be a good approximation. 214 Advantage += (CC.ExpectedDist >> Log2SlotSize) * 3; 215 } 216 } 217 218 return Advantage >= 0; 219 } 220 221 bool X86CallFrameOptimization::runOnMachineFunction(MachineFunction &MF) { 222 STI = &MF.getSubtarget<X86Subtarget>(); 223 TII = STI->getInstrInfo(); 224 TFL = STI->getFrameLowering(); 225 MRI = &MF.getRegInfo(); 226 227 const X86RegisterInfo &RegInfo = 228 *static_cast<const X86RegisterInfo *>(STI->getRegisterInfo()); 229 SlotSize = RegInfo.getSlotSize(); 230 assert(isPowerOf2_32(SlotSize) && "Expect power of 2 stack slot size"); 231 Log2SlotSize = Log2_32(SlotSize); 232 233 if (skipFunction(*MF.getFunction()) || !isLegal(MF)) 234 return false; 235 236 unsigned FrameSetupOpcode = TII->getCallFrameSetupOpcode(); 237 238 bool Changed = false; 239 240 ContextVector CallSeqVector; 241 242 for (auto &MBB : MF) 243 for (auto &MI : MBB) 244 if (MI.getOpcode() == FrameSetupOpcode) { 245 CallContext Context; 246 collectCallInfo(MF, MBB, MI, Context); 247 CallSeqVector.push_back(Context); 248 } 249 250 if (!isProfitable(MF, CallSeqVector)) 251 return false; 252 253 for (auto CC : CallSeqVector) { 254 if (CC.UsePush) { 255 adjustCallSequence(MF, CC); 256 Changed = true; 257 } 258 } 259 260 return Changed; 261 } 262 263 X86CallFrameOptimization::InstClassification 264 X86CallFrameOptimization::classifyInstruction( 265 MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, 266 const X86RegisterInfo &RegInfo, DenseSet<unsigned int> &UsedRegs) { 267 if (MI == MBB.end()) 268 return Exit; 269 270 // The instructions we actually care about are movs onto the stack 271 int Opcode = MI->getOpcode(); 272 if (Opcode == X86::MOV32mi || Opcode == X86::MOV32mr || 273 Opcode == X86::MOV64mi32 || Opcode == X86::MOV64mr) 274 return Convert; 275 276 // Not all calling conventions have only stack MOVs between the stack 277 // adjust and the call. 278 279 // We want to tolerate other instructions, to cover more cases. 280 // In particular: 281 // a) PCrel calls, where we expect an additional COPY of the basereg. 282 // b) Passing frame-index addresses. 283 // c) Calling conventions that have inreg parameters. These generate 284 // both copies and movs into registers. 285 // To avoid creating lots of special cases, allow any instruction 286 // that does not write into memory, does not def or use the stack 287 // pointer, and does not def any register that was used by a preceding 288 // push. 289 // (Reading from memory is allowed, even if referenced through a 290 // frame index, since these will get adjusted properly in PEI) 291 292 // The reason for the last condition is that the pushes can't replace 293 // the movs in place, because the order must be reversed. 294 // So if we have a MOV32mr that uses EDX, then an instruction that defs 295 // EDX, and then the call, after the transformation the push will use 296 // the modified version of EDX, and not the original one. 297 // Since we are still in SSA form at this point, we only need to 298 // make sure we don't clobber any *physical* registers that were 299 // used by an earlier mov that will become a push. 300 301 if (MI->isCall() || MI->mayStore()) 302 return Exit; 303 304 for (const MachineOperand &MO : MI->operands()) { 305 if (!MO.isReg()) 306 continue; 307 unsigned int Reg = MO.getReg(); 308 if (!RegInfo.isPhysicalRegister(Reg)) 309 continue; 310 if (RegInfo.regsOverlap(Reg, RegInfo.getStackRegister())) 311 return Exit; 312 if (MO.isDef()) { 313 for (unsigned int U : UsedRegs) 314 if (RegInfo.regsOverlap(Reg, U)) 315 return Exit; 316 } 317 } 318 319 return Skip; 320 } 321 322 void X86CallFrameOptimization::collectCallInfo(MachineFunction &MF, 323 MachineBasicBlock &MBB, 324 MachineBasicBlock::iterator I, 325 CallContext &Context) { 326 // Check that this particular call sequence is amenable to the 327 // transformation. 328 const X86RegisterInfo &RegInfo = 329 *static_cast<const X86RegisterInfo *>(STI->getRegisterInfo()); 330 unsigned FrameDestroyOpcode = TII->getCallFrameDestroyOpcode(); 331 332 // We expect to enter this at the beginning of a call sequence 333 assert(I->getOpcode() == TII->getCallFrameSetupOpcode()); 334 MachineBasicBlock::iterator FrameSetup = I++; 335 Context.FrameSetup = FrameSetup; 336 337 // How much do we adjust the stack? This puts an upper bound on 338 // the number of parameters actually passed on it. 339 unsigned int MaxAdjust = 340 FrameSetup->getOperand(0).getImm() >> Log2SlotSize; 341 342 // A zero adjustment means no stack parameters 343 if (!MaxAdjust) { 344 Context.NoStackParams = true; 345 return; 346 } 347 348 // Skip over DEBUG_VALUE. 349 // For globals in PIC mode, we can have some LEAs here. Skip them as well. 350 // TODO: Extend this to something that covers more cases. 351 while (I->getOpcode() == X86::LEA32r || I->isDebugValue()) 352 ++I; 353 354 unsigned StackPtr = RegInfo.getStackRegister(); 355 // SelectionDAG (but not FastISel) inserts a copy of ESP into a virtual 356 // register here. If it's there, use that virtual register as stack pointer 357 // instead. 358 if (I->isCopy() && I->getOperand(0).isReg() && I->getOperand(1).isReg() && 359 I->getOperand(1).getReg() == StackPtr) { 360 Context.SPCopy = &*I++; 361 StackPtr = Context.SPCopy->getOperand(0).getReg(); 362 } 363 364 // Scan the call setup sequence for the pattern we're looking for. 365 // We only handle a simple case - a sequence of store instructions that 366 // push a sequence of stack-slot-aligned values onto the stack, with 367 // no gaps between them. 368 if (MaxAdjust > 4) 369 Context.MovVector.resize(MaxAdjust, nullptr); 370 371 InstClassification Classification; 372 DenseSet<unsigned int> UsedRegs; 373 374 while ((Classification = classifyInstruction(MBB, I, RegInfo, UsedRegs)) != 375 Exit) { 376 if (Classification == Skip) { 377 ++I; 378 continue; 379 } 380 381 // We know the instruction has a supported store opcode. 382 // We only want movs of the form: 383 // mov imm/reg, k(%StackPtr) 384 // If we run into something else, bail. 385 // Note that AddrBaseReg may, counter to its name, not be a register, 386 // but rather a frame index. 387 // TODO: Support the fi case. This should probably work now that we 388 // have the infrastructure to track the stack pointer within a call 389 // sequence. 390 if (!I->getOperand(X86::AddrBaseReg).isReg() || 391 (I->getOperand(X86::AddrBaseReg).getReg() != StackPtr) || 392 !I->getOperand(X86::AddrScaleAmt).isImm() || 393 (I->getOperand(X86::AddrScaleAmt).getImm() != 1) || 394 (I->getOperand(X86::AddrIndexReg).getReg() != X86::NoRegister) || 395 (I->getOperand(X86::AddrSegmentReg).getReg() != X86::NoRegister) || 396 !I->getOperand(X86::AddrDisp).isImm()) 397 return; 398 399 int64_t StackDisp = I->getOperand(X86::AddrDisp).getImm(); 400 assert(StackDisp >= 0 && 401 "Negative stack displacement when passing parameters"); 402 403 // We really don't want to consider the unaligned case. 404 if (StackDisp & (SlotSize - 1)) 405 return; 406 StackDisp >>= Log2SlotSize; 407 408 assert((size_t)StackDisp < Context.MovVector.size() && 409 "Function call has more parameters than the stack is adjusted for."); 410 411 // If the same stack slot is being filled twice, something's fishy. 412 if (Context.MovVector[StackDisp] != nullptr) 413 return; 414 Context.MovVector[StackDisp] = &*I; 415 416 for (const MachineOperand &MO : I->uses()) { 417 if (!MO.isReg()) 418 continue; 419 unsigned int Reg = MO.getReg(); 420 if (RegInfo.isPhysicalRegister(Reg)) 421 UsedRegs.insert(Reg); 422 } 423 424 ++I; 425 } 426 427 // We now expect the end of the sequence. If we stopped early, 428 // or reached the end of the block without finding a call, bail. 429 if (I == MBB.end() || !I->isCall()) 430 return; 431 432 Context.Call = &*I; 433 if ((++I)->getOpcode() != FrameDestroyOpcode) 434 return; 435 436 // Now, go through the vector, and see that we don't have any gaps, 437 // but only a series of MOVs. 438 auto MMI = Context.MovVector.begin(), MME = Context.MovVector.end(); 439 for (; MMI != MME; ++MMI, Context.ExpectedDist += SlotSize) 440 if (*MMI == nullptr) 441 break; 442 443 // If the call had no parameters, do nothing 444 if (MMI == Context.MovVector.begin()) 445 return; 446 447 // We are either at the last parameter, or a gap. 448 // Make sure it's not a gap 449 for (; MMI != MME; ++MMI) 450 if (*MMI != nullptr) 451 return; 452 453 Context.UsePush = true; 454 } 455 456 void X86CallFrameOptimization::adjustCallSequence(MachineFunction &MF, 457 const CallContext &Context) { 458 // Ok, we can in fact do the transformation for this call. 459 // Do not remove the FrameSetup instruction, but adjust the parameters. 460 // PEI will end up finalizing the handling of this. 461 MachineBasicBlock::iterator FrameSetup = Context.FrameSetup; 462 MachineBasicBlock &MBB = *(FrameSetup->getParent()); 463 FrameSetup->getOperand(1).setImm(Context.ExpectedDist); 464 465 DebugLoc DL = FrameSetup->getDebugLoc(); 466 bool Is64Bit = STI->is64Bit(); 467 // Now, iterate through the vector in reverse order, and replace the movs 468 // with pushes. MOVmi/MOVmr doesn't have any defs, so no need to 469 // replace uses. 470 for (int Idx = (Context.ExpectedDist >> Log2SlotSize) - 1; Idx >= 0; --Idx) { 471 MachineBasicBlock::iterator MOV = *Context.MovVector[Idx]; 472 MachineOperand PushOp = MOV->getOperand(X86::AddrNumOperands); 473 MachineBasicBlock::iterator Push = nullptr; 474 unsigned PushOpcode; 475 switch (MOV->getOpcode()) { 476 default: 477 llvm_unreachable("Unexpected Opcode!"); 478 case X86::MOV32mi: 479 case X86::MOV64mi32: 480 PushOpcode = Is64Bit ? X86::PUSH64i32 : X86::PUSHi32; 481 // If the operand is a small (8-bit) immediate, we can use a 482 // PUSH instruction with a shorter encoding. 483 // Note that isImm() may fail even though this is a MOVmi, because 484 // the operand can also be a symbol. 485 if (PushOp.isImm()) { 486 int64_t Val = PushOp.getImm(); 487 if (isInt<8>(Val)) 488 PushOpcode = Is64Bit ? X86::PUSH64i8 : X86::PUSH32i8; 489 } 490 Push = BuildMI(MBB, Context.Call, DL, TII->get(PushOpcode)) 491 .addOperand(PushOp); 492 break; 493 case X86::MOV32mr: 494 case X86::MOV64mr: 495 unsigned int Reg = PushOp.getReg(); 496 497 // If storing a 32-bit vreg on 64-bit targets, extend to a 64-bit vreg 498 // in preparation for the PUSH64. The upper 32 bits can be undef. 499 if (Is64Bit && MOV->getOpcode() == X86::MOV32mr) { 500 unsigned UndefReg = MRI->createVirtualRegister(&X86::GR64RegClass); 501 Reg = MRI->createVirtualRegister(&X86::GR64RegClass); 502 BuildMI(MBB, Context.Call, DL, TII->get(X86::IMPLICIT_DEF), UndefReg); 503 BuildMI(MBB, Context.Call, DL, TII->get(X86::INSERT_SUBREG), Reg) 504 .addReg(UndefReg) 505 .addOperand(PushOp) 506 .addImm(X86::sub_32bit); 507 } 508 509 // If PUSHrmm is not slow on this target, try to fold the source of the 510 // push into the instruction. 511 bool SlowPUSHrmm = STI->isAtom() || STI->isSLM(); 512 513 // Check that this is legal to fold. Right now, we're extremely 514 // conservative about that. 515 MachineInstr *DefMov = nullptr; 516 if (!SlowPUSHrmm && (DefMov = canFoldIntoRegPush(FrameSetup, Reg))) { 517 PushOpcode = Is64Bit ? X86::PUSH64rmm : X86::PUSH32rmm; 518 Push = BuildMI(MBB, Context.Call, DL, TII->get(PushOpcode)); 519 520 unsigned NumOps = DefMov->getDesc().getNumOperands(); 521 for (unsigned i = NumOps - X86::AddrNumOperands; i != NumOps; ++i) 522 Push->addOperand(DefMov->getOperand(i)); 523 524 DefMov->eraseFromParent(); 525 } else { 526 PushOpcode = Is64Bit ? X86::PUSH64r : X86::PUSH32r; 527 Push = BuildMI(MBB, Context.Call, DL, TII->get(PushOpcode)) 528 .addReg(Reg) 529 .getInstr(); 530 } 531 break; 532 } 533 534 // For debugging, when using SP-based CFA, we need to adjust the CFA 535 // offset after each push. 536 // TODO: This is needed only if we require precise CFA. 537 if (!TFL->hasFP(MF)) 538 TFL->BuildCFI( 539 MBB, std::next(Push), DL, 540 MCCFIInstruction::createAdjustCfaOffset(nullptr, SlotSize)); 541 542 MBB.erase(MOV); 543 } 544 545 // The stack-pointer copy is no longer used in the call sequences. 546 // There should not be any other users, but we can't commit to that, so: 547 if (Context.SPCopy && MRI->use_empty(Context.SPCopy->getOperand(0).getReg())) 548 Context.SPCopy->eraseFromParent(); 549 550 // Once we've done this, we need to make sure PEI doesn't assume a reserved 551 // frame. 552 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 553 FuncInfo->setHasPushSequences(true); 554 } 555 556 MachineInstr *X86CallFrameOptimization::canFoldIntoRegPush( 557 MachineBasicBlock::iterator FrameSetup, unsigned Reg) { 558 // Do an extremely restricted form of load folding. 559 // ISel will often create patterns like: 560 // movl 4(%edi), %eax 561 // movl 8(%edi), %ecx 562 // movl 12(%edi), %edx 563 // movl %edx, 8(%esp) 564 // movl %ecx, 4(%esp) 565 // movl %eax, (%esp) 566 // call 567 // Get rid of those with prejudice. 568 if (!TargetRegisterInfo::isVirtualRegister(Reg)) 569 return nullptr; 570 571 // Make sure this is the only use of Reg. 572 if (!MRI->hasOneNonDBGUse(Reg)) 573 return nullptr; 574 575 MachineInstr &DefMI = *MRI->getVRegDef(Reg); 576 577 // Make sure the def is a MOV from memory. 578 // If the def is in another block, give up. 579 if ((DefMI.getOpcode() != X86::MOV32rm && 580 DefMI.getOpcode() != X86::MOV64rm) || 581 DefMI.getParent() != FrameSetup->getParent()) 582 return nullptr; 583 584 // Make sure we don't have any instructions between DefMI and the 585 // push that make folding the load illegal. 586 for (MachineBasicBlock::iterator I = DefMI; I != FrameSetup; ++I) 587 if (I->isLoadFoldBarrier()) 588 return nullptr; 589 590 return &DefMI; 591 } 592