1 //===-- X86FrameLowering.cpp - X86 Frame Information ----------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file contains the X86 implementation of TargetFrameLowering class. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "X86FrameLowering.h" 14 #include "X86InstrBuilder.h" 15 #include "X86InstrInfo.h" 16 #include "X86MachineFunctionInfo.h" 17 #include "X86Subtarget.h" 18 #include "X86TargetMachine.h" 19 #include "llvm/ADT/SmallSet.h" 20 #include "llvm/ADT/Statistic.h" 21 #include "llvm/Analysis/EHPersonalities.h" 22 #include "llvm/CodeGen/MachineFrameInfo.h" 23 #include "llvm/CodeGen/MachineFunction.h" 24 #include "llvm/CodeGen/MachineInstrBuilder.h" 25 #include "llvm/CodeGen/MachineModuleInfo.h" 26 #include "llvm/CodeGen/MachineRegisterInfo.h" 27 #include "llvm/CodeGen/WinEHFuncInfo.h" 28 #include "llvm/IR/DataLayout.h" 29 #include "llvm/IR/Function.h" 30 #include "llvm/MC/MCAsmInfo.h" 31 #include "llvm/MC/MCObjectFileInfo.h" 32 #include "llvm/MC/MCSymbol.h" 33 #include "llvm/Support/Debug.h" 34 #include "llvm/Target/TargetOptions.h" 35 #include <cstdlib> 36 37 #define DEBUG_TYPE "x86-fl" 38 39 STATISTIC(NumFrameLoopProbe, "Number of loop stack probes used in prologue"); 40 STATISTIC(NumFrameExtraProbe, 41 "Number of extra stack probes generated in prologue"); 42 43 using namespace llvm; 44 45 X86FrameLowering::X86FrameLowering(const X86Subtarget &STI, 46 MaybeAlign StackAlignOverride) 47 : TargetFrameLowering(StackGrowsDown, StackAlignOverride.valueOrOne(), 48 STI.is64Bit() ? -8 : -4), 49 STI(STI), TII(*STI.getInstrInfo()), TRI(STI.getRegisterInfo()) { 50 // Cache a bunch of frame-related predicates for this subtarget. 51 SlotSize = TRI->getSlotSize(); 52 Is64Bit = STI.is64Bit(); 53 IsLP64 = STI.isTarget64BitLP64(); 54 // standard x86_64 and NaCl use 64-bit frame/stack pointers, x32 - 32-bit. 55 Uses64BitFramePtr = STI.isTarget64BitLP64() || STI.isTargetNaCl64(); 56 StackPtr = TRI->getStackRegister(); 57 } 58 59 bool X86FrameLowering::hasReservedCallFrame(const MachineFunction &MF) const { 60 return !MF.getFrameInfo().hasVarSizedObjects() && 61 !MF.getInfo<X86MachineFunctionInfo>()->getHasPushSequences() && 62 !MF.getInfo<X86MachineFunctionInfo>()->hasPreallocatedCall(); 63 } 64 65 /// canSimplifyCallFramePseudos - If there is a reserved call frame, the 66 /// call frame pseudos can be simplified. Having a FP, as in the default 67 /// implementation, is not sufficient here since we can't always use it. 68 /// Use a more nuanced condition. 69 bool 70 X86FrameLowering::canSimplifyCallFramePseudos(const MachineFunction &MF) const { 71 return hasReservedCallFrame(MF) || 72 MF.getInfo<X86MachineFunctionInfo>()->hasPreallocatedCall() || 73 (hasFP(MF) && !TRI->hasStackRealignment(MF)) || 74 TRI->hasBasePointer(MF); 75 } 76 77 // needsFrameIndexResolution - Do we need to perform FI resolution for 78 // this function. Normally, this is required only when the function 79 // has any stack objects. However, FI resolution actually has another job, 80 // not apparent from the title - it resolves callframesetup/destroy 81 // that were not simplified earlier. 82 // So, this is required for x86 functions that have push sequences even 83 // when there are no stack objects. 84 bool 85 X86FrameLowering::needsFrameIndexResolution(const MachineFunction &MF) const { 86 return MF.getFrameInfo().hasStackObjects() || 87 MF.getInfo<X86MachineFunctionInfo>()->getHasPushSequences(); 88 } 89 90 /// hasFP - Return true if the specified function should have a dedicated frame 91 /// pointer register. This is true if the function has variable sized allocas 92 /// or if frame pointer elimination is disabled. 93 bool X86FrameLowering::hasFP(const MachineFunction &MF) const { 94 const MachineFrameInfo &MFI = MF.getFrameInfo(); 95 return (MF.getTarget().Options.DisableFramePointerElim(MF) || 96 TRI->hasStackRealignment(MF) || MFI.hasVarSizedObjects() || 97 MFI.isFrameAddressTaken() || MFI.hasOpaqueSPAdjustment() || 98 MF.getInfo<X86MachineFunctionInfo>()->getForceFramePointer() || 99 MF.getInfo<X86MachineFunctionInfo>()->hasPreallocatedCall() || 100 MF.callsUnwindInit() || MF.hasEHFunclets() || MF.callsEHReturn() || 101 MFI.hasStackMap() || MFI.hasPatchPoint() || 102 MFI.hasCopyImplyingStackAdjustment()); 103 } 104 105 static unsigned getSUBriOpcode(bool IsLP64, int64_t Imm) { 106 if (IsLP64) { 107 if (isInt<8>(Imm)) 108 return X86::SUB64ri8; 109 return X86::SUB64ri32; 110 } else { 111 if (isInt<8>(Imm)) 112 return X86::SUB32ri8; 113 return X86::SUB32ri; 114 } 115 } 116 117 static unsigned getADDriOpcode(bool IsLP64, int64_t Imm) { 118 if (IsLP64) { 119 if (isInt<8>(Imm)) 120 return X86::ADD64ri8; 121 return X86::ADD64ri32; 122 } else { 123 if (isInt<8>(Imm)) 124 return X86::ADD32ri8; 125 return X86::ADD32ri; 126 } 127 } 128 129 static unsigned getSUBrrOpcode(bool IsLP64) { 130 return IsLP64 ? X86::SUB64rr : X86::SUB32rr; 131 } 132 133 static unsigned getADDrrOpcode(bool IsLP64) { 134 return IsLP64 ? X86::ADD64rr : X86::ADD32rr; 135 } 136 137 static unsigned getANDriOpcode(bool IsLP64, int64_t Imm) { 138 if (IsLP64) { 139 if (isInt<8>(Imm)) 140 return X86::AND64ri8; 141 return X86::AND64ri32; 142 } 143 if (isInt<8>(Imm)) 144 return X86::AND32ri8; 145 return X86::AND32ri; 146 } 147 148 static unsigned getLEArOpcode(bool IsLP64) { 149 return IsLP64 ? X86::LEA64r : X86::LEA32r; 150 } 151 152 static bool isEAXLiveIn(MachineBasicBlock &MBB) { 153 for (MachineBasicBlock::RegisterMaskPair RegMask : MBB.liveins()) { 154 unsigned Reg = RegMask.PhysReg; 155 156 if (Reg == X86::RAX || Reg == X86::EAX || Reg == X86::AX || 157 Reg == X86::AH || Reg == X86::AL) 158 return true; 159 } 160 161 return false; 162 } 163 164 /// Check if the flags need to be preserved before the terminators. 165 /// This would be the case, if the eflags is live-in of the region 166 /// composed by the terminators or live-out of that region, without 167 /// being defined by a terminator. 168 static bool 169 flagsNeedToBePreservedBeforeTheTerminators(const MachineBasicBlock &MBB) { 170 for (const MachineInstr &MI : MBB.terminators()) { 171 bool BreakNext = false; 172 for (const MachineOperand &MO : MI.operands()) { 173 if (!MO.isReg()) 174 continue; 175 Register Reg = MO.getReg(); 176 if (Reg != X86::EFLAGS) 177 continue; 178 179 // This terminator needs an eflags that is not defined 180 // by a previous another terminator: 181 // EFLAGS is live-in of the region composed by the terminators. 182 if (!MO.isDef()) 183 return true; 184 // This terminator defines the eflags, i.e., we don't need to preserve it. 185 // However, we still need to check this specific terminator does not 186 // read a live-in value. 187 BreakNext = true; 188 } 189 // We found a definition of the eflags, no need to preserve them. 190 if (BreakNext) 191 return false; 192 } 193 194 // None of the terminators use or define the eflags. 195 // Check if they are live-out, that would imply we need to preserve them. 196 for (const MachineBasicBlock *Succ : MBB.successors()) 197 if (Succ->isLiveIn(X86::EFLAGS)) 198 return true; 199 200 return false; 201 } 202 203 /// emitSPUpdate - Emit a series of instructions to increment / decrement the 204 /// stack pointer by a constant value. 205 void X86FrameLowering::emitSPUpdate(MachineBasicBlock &MBB, 206 MachineBasicBlock::iterator &MBBI, 207 const DebugLoc &DL, 208 int64_t NumBytes, bool InEpilogue) const { 209 bool isSub = NumBytes < 0; 210 uint64_t Offset = isSub ? -NumBytes : NumBytes; 211 MachineInstr::MIFlag Flag = 212 isSub ? MachineInstr::FrameSetup : MachineInstr::FrameDestroy; 213 214 uint64_t Chunk = (1LL << 31) - 1; 215 216 MachineFunction &MF = *MBB.getParent(); 217 const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>(); 218 const X86TargetLowering &TLI = *STI.getTargetLowering(); 219 const bool EmitInlineStackProbe = TLI.hasInlineStackProbe(MF); 220 221 // It's ok to not take into account large chunks when probing, as the 222 // allocation is split in smaller chunks anyway. 223 if (EmitInlineStackProbe && !InEpilogue) { 224 225 // This pseudo-instruction is going to be expanded, potentially using a 226 // loop, by inlineStackProbe(). 227 BuildMI(MBB, MBBI, DL, TII.get(X86::STACKALLOC_W_PROBING)).addImm(Offset); 228 return; 229 } else if (Offset > Chunk) { 230 // Rather than emit a long series of instructions for large offsets, 231 // load the offset into a register and do one sub/add 232 unsigned Reg = 0; 233 unsigned Rax = (unsigned)(Is64Bit ? X86::RAX : X86::EAX); 234 235 if (isSub && !isEAXLiveIn(MBB)) 236 Reg = Rax; 237 else 238 Reg = TRI->findDeadCallerSavedReg(MBB, MBBI); 239 240 unsigned MovRIOpc = Is64Bit ? X86::MOV64ri : X86::MOV32ri; 241 unsigned AddSubRROpc = 242 isSub ? getSUBrrOpcode(Is64Bit) : getADDrrOpcode(Is64Bit); 243 if (Reg) { 244 BuildMI(MBB, MBBI, DL, TII.get(MovRIOpc), Reg) 245 .addImm(Offset) 246 .setMIFlag(Flag); 247 MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII.get(AddSubRROpc), StackPtr) 248 .addReg(StackPtr) 249 .addReg(Reg); 250 MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead. 251 return; 252 } else if (Offset > 8 * Chunk) { 253 // If we would need more than 8 add or sub instructions (a >16GB stack 254 // frame), it's worth spilling RAX to materialize this immediate. 255 // pushq %rax 256 // movabsq +-$Offset+-SlotSize, %rax 257 // addq %rsp, %rax 258 // xchg %rax, (%rsp) 259 // movq (%rsp), %rsp 260 assert(Is64Bit && "can't have 32-bit 16GB stack frame"); 261 BuildMI(MBB, MBBI, DL, TII.get(X86::PUSH64r)) 262 .addReg(Rax, RegState::Kill) 263 .setMIFlag(Flag); 264 // Subtract is not commutative, so negate the offset and always use add. 265 // Subtract 8 less and add 8 more to account for the PUSH we just did. 266 if (isSub) 267 Offset = -(Offset - SlotSize); 268 else 269 Offset = Offset + SlotSize; 270 BuildMI(MBB, MBBI, DL, TII.get(MovRIOpc), Rax) 271 .addImm(Offset) 272 .setMIFlag(Flag); 273 MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII.get(X86::ADD64rr), Rax) 274 .addReg(Rax) 275 .addReg(StackPtr); 276 MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead. 277 // Exchange the new SP in RAX with the top of the stack. 278 addRegOffset( 279 BuildMI(MBB, MBBI, DL, TII.get(X86::XCHG64rm), Rax).addReg(Rax), 280 StackPtr, false, 0); 281 // Load new SP from the top of the stack into RSP. 282 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64rm), StackPtr), 283 StackPtr, false, 0); 284 return; 285 } 286 } 287 288 while (Offset) { 289 uint64_t ThisVal = std::min(Offset, Chunk); 290 if (ThisVal == SlotSize) { 291 // Use push / pop for slot sized adjustments as a size optimization. We 292 // need to find a dead register when using pop. 293 unsigned Reg = isSub 294 ? (unsigned)(Is64Bit ? X86::RAX : X86::EAX) 295 : TRI->findDeadCallerSavedReg(MBB, MBBI); 296 if (Reg) { 297 unsigned Opc = isSub 298 ? (Is64Bit ? X86::PUSH64r : X86::PUSH32r) 299 : (Is64Bit ? X86::POP64r : X86::POP32r); 300 BuildMI(MBB, MBBI, DL, TII.get(Opc)) 301 .addReg(Reg, getDefRegState(!isSub) | getUndefRegState(isSub)) 302 .setMIFlag(Flag); 303 Offset -= ThisVal; 304 continue; 305 } 306 } 307 308 BuildStackAdjustment(MBB, MBBI, DL, isSub ? -ThisVal : ThisVal, InEpilogue) 309 .setMIFlag(Flag); 310 311 Offset -= ThisVal; 312 } 313 } 314 315 MachineInstrBuilder X86FrameLowering::BuildStackAdjustment( 316 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, 317 const DebugLoc &DL, int64_t Offset, bool InEpilogue) const { 318 assert(Offset != 0 && "zero offset stack adjustment requested"); 319 320 // On Atom, using LEA to adjust SP is preferred, but using it in the epilogue 321 // is tricky. 322 bool UseLEA; 323 if (!InEpilogue) { 324 // Check if inserting the prologue at the beginning 325 // of MBB would require to use LEA operations. 326 // We need to use LEA operations if EFLAGS is live in, because 327 // it means an instruction will read it before it gets defined. 328 UseLEA = STI.useLeaForSP() || MBB.isLiveIn(X86::EFLAGS); 329 } else { 330 // If we can use LEA for SP but we shouldn't, check that none 331 // of the terminators uses the eflags. Otherwise we will insert 332 // a ADD that will redefine the eflags and break the condition. 333 // Alternatively, we could move the ADD, but this may not be possible 334 // and is an optimization anyway. 335 UseLEA = canUseLEAForSPInEpilogue(*MBB.getParent()); 336 if (UseLEA && !STI.useLeaForSP()) 337 UseLEA = flagsNeedToBePreservedBeforeTheTerminators(MBB); 338 // If that assert breaks, that means we do not do the right thing 339 // in canUseAsEpilogue. 340 assert((UseLEA || !flagsNeedToBePreservedBeforeTheTerminators(MBB)) && 341 "We shouldn't have allowed this insertion point"); 342 } 343 344 MachineInstrBuilder MI; 345 if (UseLEA) { 346 MI = addRegOffset(BuildMI(MBB, MBBI, DL, 347 TII.get(getLEArOpcode(Uses64BitFramePtr)), 348 StackPtr), 349 StackPtr, false, Offset); 350 } else { 351 bool IsSub = Offset < 0; 352 uint64_t AbsOffset = IsSub ? -Offset : Offset; 353 const unsigned Opc = IsSub ? getSUBriOpcode(Uses64BitFramePtr, AbsOffset) 354 : getADDriOpcode(Uses64BitFramePtr, AbsOffset); 355 MI = BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr) 356 .addReg(StackPtr) 357 .addImm(AbsOffset); 358 MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead. 359 } 360 return MI; 361 } 362 363 int X86FrameLowering::mergeSPUpdates(MachineBasicBlock &MBB, 364 MachineBasicBlock::iterator &MBBI, 365 bool doMergeWithPrevious) const { 366 if ((doMergeWithPrevious && MBBI == MBB.begin()) || 367 (!doMergeWithPrevious && MBBI == MBB.end())) 368 return 0; 369 370 MachineBasicBlock::iterator PI = doMergeWithPrevious ? std::prev(MBBI) : MBBI; 371 372 PI = skipDebugInstructionsBackward(PI, MBB.begin()); 373 // It is assumed that ADD/SUB/LEA instruction is succeded by one CFI 374 // instruction, and that there are no DBG_VALUE or other instructions between 375 // ADD/SUB/LEA and its corresponding CFI instruction. 376 /* TODO: Add support for the case where there are multiple CFI instructions 377 below the ADD/SUB/LEA, e.g.: 378 ... 379 add 380 cfi_def_cfa_offset 381 cfi_offset 382 ... 383 */ 384 if (doMergeWithPrevious && PI != MBB.begin() && PI->isCFIInstruction()) 385 PI = std::prev(PI); 386 387 unsigned Opc = PI->getOpcode(); 388 int Offset = 0; 389 390 if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 || 391 Opc == X86::ADD32ri || Opc == X86::ADD32ri8) && 392 PI->getOperand(0).getReg() == StackPtr){ 393 assert(PI->getOperand(1).getReg() == StackPtr); 394 Offset = PI->getOperand(2).getImm(); 395 } else if ((Opc == X86::LEA32r || Opc == X86::LEA64_32r) && 396 PI->getOperand(0).getReg() == StackPtr && 397 PI->getOperand(1).getReg() == StackPtr && 398 PI->getOperand(2).getImm() == 1 && 399 PI->getOperand(3).getReg() == X86::NoRegister && 400 PI->getOperand(5).getReg() == X86::NoRegister) { 401 // For LEAs we have: def = lea SP, FI, noreg, Offset, noreg. 402 Offset = PI->getOperand(4).getImm(); 403 } else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 || 404 Opc == X86::SUB32ri || Opc == X86::SUB32ri8) && 405 PI->getOperand(0).getReg() == StackPtr) { 406 assert(PI->getOperand(1).getReg() == StackPtr); 407 Offset = -PI->getOperand(2).getImm(); 408 } else 409 return 0; 410 411 PI = MBB.erase(PI); 412 if (PI != MBB.end() && PI->isCFIInstruction()) { 413 auto CIs = MBB.getParent()->getFrameInstructions(); 414 MCCFIInstruction CI = CIs[PI->getOperand(0).getCFIIndex()]; 415 if (CI.getOperation() == MCCFIInstruction::OpDefCfaOffset || 416 CI.getOperation() == MCCFIInstruction::OpAdjustCfaOffset) 417 PI = MBB.erase(PI); 418 } 419 if (!doMergeWithPrevious) 420 MBBI = skipDebugInstructionsForward(PI, MBB.end()); 421 422 return Offset; 423 } 424 425 void X86FrameLowering::BuildCFI(MachineBasicBlock &MBB, 426 MachineBasicBlock::iterator MBBI, 427 const DebugLoc &DL, 428 const MCCFIInstruction &CFIInst) const { 429 MachineFunction &MF = *MBB.getParent(); 430 unsigned CFIIndex = MF.addFrameInst(CFIInst); 431 BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION)) 432 .addCFIIndex(CFIIndex); 433 } 434 435 /// Emits Dwarf Info specifying offsets of callee saved registers and 436 /// frame pointer. This is called only when basic block sections are enabled. 437 void X86FrameLowering::emitCalleeSavedFrameMoves( 438 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI) const { 439 MachineFunction &MF = *MBB.getParent(); 440 if (!hasFP(MF)) { 441 emitCalleeSavedFrameMoves(MBB, MBBI, DebugLoc{}, true); 442 return; 443 } 444 const MachineModuleInfo &MMI = MF.getMMI(); 445 const MCRegisterInfo *MRI = MMI.getContext().getRegisterInfo(); 446 const Register FramePtr = TRI->getFrameRegister(MF); 447 const Register MachineFramePtr = 448 STI.isTarget64BitILP32() ? Register(getX86SubSuperRegister(FramePtr, 64)) 449 : FramePtr; 450 unsigned DwarfReg = MRI->getDwarfRegNum(MachineFramePtr, true); 451 // Offset = space for return address + size of the frame pointer itself. 452 unsigned Offset = (Is64Bit ? 8 : 4) + (Uses64BitFramePtr ? 8 : 4); 453 BuildCFI(MBB, MBBI, DebugLoc{}, 454 MCCFIInstruction::createOffset(nullptr, DwarfReg, -Offset)); 455 emitCalleeSavedFrameMoves(MBB, MBBI, DebugLoc{}, true); 456 } 457 458 void X86FrameLowering::emitCalleeSavedFrameMoves( 459 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, 460 const DebugLoc &DL, bool IsPrologue) const { 461 MachineFunction &MF = *MBB.getParent(); 462 MachineFrameInfo &MFI = MF.getFrameInfo(); 463 MachineModuleInfo &MMI = MF.getMMI(); 464 const MCRegisterInfo *MRI = MMI.getContext().getRegisterInfo(); 465 466 // Add callee saved registers to move list. 467 const std::vector<CalleeSavedInfo> &CSI = MFI.getCalleeSavedInfo(); 468 469 // Calculate offsets. 470 for (const CalleeSavedInfo &I : CSI) { 471 int64_t Offset = MFI.getObjectOffset(I.getFrameIdx()); 472 unsigned Reg = I.getReg(); 473 unsigned DwarfReg = MRI->getDwarfRegNum(Reg, true); 474 475 if (IsPrologue) { 476 BuildCFI(MBB, MBBI, DL, 477 MCCFIInstruction::createOffset(nullptr, DwarfReg, Offset)); 478 } else { 479 BuildCFI(MBB, MBBI, DL, 480 MCCFIInstruction::createRestore(nullptr, DwarfReg)); 481 } 482 } 483 } 484 485 void X86FrameLowering::emitStackProbe(MachineFunction &MF, 486 MachineBasicBlock &MBB, 487 MachineBasicBlock::iterator MBBI, 488 const DebugLoc &DL, bool InProlog) const { 489 const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>(); 490 if (STI.isTargetWindowsCoreCLR()) { 491 if (InProlog) { 492 BuildMI(MBB, MBBI, DL, TII.get(X86::STACKALLOC_W_PROBING)) 493 .addImm(0 /* no explicit stack size */); 494 } else { 495 emitStackProbeInline(MF, MBB, MBBI, DL, false); 496 } 497 } else { 498 emitStackProbeCall(MF, MBB, MBBI, DL, InProlog); 499 } 500 } 501 502 bool X86FrameLowering::stackProbeFunctionModifiesSP() const { 503 return STI.isOSWindows() && !STI.isTargetWin64(); 504 } 505 506 void X86FrameLowering::inlineStackProbe(MachineFunction &MF, 507 MachineBasicBlock &PrologMBB) const { 508 auto Where = llvm::find_if(PrologMBB, [](MachineInstr &MI) { 509 return MI.getOpcode() == X86::STACKALLOC_W_PROBING; 510 }); 511 if (Where != PrologMBB.end()) { 512 DebugLoc DL = PrologMBB.findDebugLoc(Where); 513 emitStackProbeInline(MF, PrologMBB, Where, DL, true); 514 Where->eraseFromParent(); 515 } 516 } 517 518 void X86FrameLowering::emitStackProbeInline(MachineFunction &MF, 519 MachineBasicBlock &MBB, 520 MachineBasicBlock::iterator MBBI, 521 const DebugLoc &DL, 522 bool InProlog) const { 523 const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>(); 524 if (STI.isTargetWindowsCoreCLR() && STI.is64Bit()) 525 emitStackProbeInlineWindowsCoreCLR64(MF, MBB, MBBI, DL, InProlog); 526 else 527 emitStackProbeInlineGeneric(MF, MBB, MBBI, DL, InProlog); 528 } 529 530 void X86FrameLowering::emitStackProbeInlineGeneric( 531 MachineFunction &MF, MachineBasicBlock &MBB, 532 MachineBasicBlock::iterator MBBI, const DebugLoc &DL, bool InProlog) const { 533 MachineInstr &AllocWithProbe = *MBBI; 534 uint64_t Offset = AllocWithProbe.getOperand(0).getImm(); 535 536 const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>(); 537 const X86TargetLowering &TLI = *STI.getTargetLowering(); 538 assert(!(STI.is64Bit() && STI.isTargetWindowsCoreCLR()) && 539 "different expansion expected for CoreCLR 64 bit"); 540 541 const uint64_t StackProbeSize = TLI.getStackProbeSize(MF); 542 uint64_t ProbeChunk = StackProbeSize * 8; 543 544 uint64_t MaxAlign = 545 TRI->hasStackRealignment(MF) ? calculateMaxStackAlign(MF) : 0; 546 547 // Synthesize a loop or unroll it, depending on the number of iterations. 548 // BuildStackAlignAND ensures that only MaxAlign % StackProbeSize bits left 549 // between the unaligned rsp and current rsp. 550 if (Offset > ProbeChunk) { 551 emitStackProbeInlineGenericLoop(MF, MBB, MBBI, DL, Offset, 552 MaxAlign % StackProbeSize); 553 } else { 554 emitStackProbeInlineGenericBlock(MF, MBB, MBBI, DL, Offset, 555 MaxAlign % StackProbeSize); 556 } 557 } 558 559 void X86FrameLowering::emitStackProbeInlineGenericBlock( 560 MachineFunction &MF, MachineBasicBlock &MBB, 561 MachineBasicBlock::iterator MBBI, const DebugLoc &DL, uint64_t Offset, 562 uint64_t AlignOffset) const { 563 564 const bool NeedsDwarfCFI = needsDwarfCFI(MF); 565 const bool HasFP = hasFP(MF); 566 const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>(); 567 const X86TargetLowering &TLI = *STI.getTargetLowering(); 568 const unsigned Opc = getSUBriOpcode(Uses64BitFramePtr, Offset); 569 const unsigned MovMIOpc = Is64Bit ? X86::MOV64mi32 : X86::MOV32mi; 570 const uint64_t StackProbeSize = TLI.getStackProbeSize(MF); 571 572 uint64_t CurrentOffset = 0; 573 574 assert(AlignOffset < StackProbeSize); 575 576 // If the offset is so small it fits within a page, there's nothing to do. 577 if (StackProbeSize < Offset + AlignOffset) { 578 579 MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr) 580 .addReg(StackPtr) 581 .addImm(StackProbeSize - AlignOffset) 582 .setMIFlag(MachineInstr::FrameSetup); 583 if (!HasFP && NeedsDwarfCFI) { 584 BuildCFI(MBB, MBBI, DL, 585 MCCFIInstruction::createAdjustCfaOffset( 586 nullptr, StackProbeSize - AlignOffset)); 587 } 588 MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead. 589 590 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(MovMIOpc)) 591 .setMIFlag(MachineInstr::FrameSetup), 592 StackPtr, false, 0) 593 .addImm(0) 594 .setMIFlag(MachineInstr::FrameSetup); 595 NumFrameExtraProbe++; 596 CurrentOffset = StackProbeSize - AlignOffset; 597 } 598 599 // For the next N - 1 pages, just probe. I tried to take advantage of 600 // natural probes but it implies much more logic and there was very few 601 // interesting natural probes to interleave. 602 while (CurrentOffset + StackProbeSize < Offset) { 603 MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr) 604 .addReg(StackPtr) 605 .addImm(StackProbeSize) 606 .setMIFlag(MachineInstr::FrameSetup); 607 MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead. 608 609 if (!HasFP && NeedsDwarfCFI) { 610 BuildCFI( 611 MBB, MBBI, DL, 612 MCCFIInstruction::createAdjustCfaOffset(nullptr, StackProbeSize)); 613 } 614 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(MovMIOpc)) 615 .setMIFlag(MachineInstr::FrameSetup), 616 StackPtr, false, 0) 617 .addImm(0) 618 .setMIFlag(MachineInstr::FrameSetup); 619 NumFrameExtraProbe++; 620 CurrentOffset += StackProbeSize; 621 } 622 623 // No need to probe the tail, it is smaller than a Page. 624 uint64_t ChunkSize = Offset - CurrentOffset; 625 MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr) 626 .addReg(StackPtr) 627 .addImm(ChunkSize) 628 .setMIFlag(MachineInstr::FrameSetup); 629 // No need to adjust Dwarf CFA offset here, the last position of the stack has 630 // been defined 631 MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead. 632 } 633 634 void X86FrameLowering::emitStackProbeInlineGenericLoop( 635 MachineFunction &MF, MachineBasicBlock &MBB, 636 MachineBasicBlock::iterator MBBI, const DebugLoc &DL, uint64_t Offset, 637 uint64_t AlignOffset) const { 638 assert(Offset && "null offset"); 639 640 const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>(); 641 const X86TargetLowering &TLI = *STI.getTargetLowering(); 642 const unsigned MovMIOpc = Is64Bit ? X86::MOV64mi32 : X86::MOV32mi; 643 const uint64_t StackProbeSize = TLI.getStackProbeSize(MF); 644 645 if (AlignOffset) { 646 if (AlignOffset < StackProbeSize) { 647 // Perform a first smaller allocation followed by a probe. 648 const unsigned SUBOpc = getSUBriOpcode(Uses64BitFramePtr, AlignOffset); 649 MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII.get(SUBOpc), StackPtr) 650 .addReg(StackPtr) 651 .addImm(AlignOffset) 652 .setMIFlag(MachineInstr::FrameSetup); 653 MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead. 654 655 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(MovMIOpc)) 656 .setMIFlag(MachineInstr::FrameSetup), 657 StackPtr, false, 0) 658 .addImm(0) 659 .setMIFlag(MachineInstr::FrameSetup); 660 NumFrameExtraProbe++; 661 Offset -= AlignOffset; 662 } 663 } 664 665 // Synthesize a loop 666 NumFrameLoopProbe++; 667 const BasicBlock *LLVM_BB = MBB.getBasicBlock(); 668 669 MachineBasicBlock *testMBB = MF.CreateMachineBasicBlock(LLVM_BB); 670 MachineBasicBlock *tailMBB = MF.CreateMachineBasicBlock(LLVM_BB); 671 672 MachineFunction::iterator MBBIter = ++MBB.getIterator(); 673 MF.insert(MBBIter, testMBB); 674 MF.insert(MBBIter, tailMBB); 675 676 Register FinalStackProbed = Uses64BitFramePtr ? X86::R11 677 : Is64Bit ? X86::R11D 678 : X86::EAX; 679 BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::COPY), FinalStackProbed) 680 .addReg(StackPtr) 681 .setMIFlag(MachineInstr::FrameSetup); 682 683 // save loop bound 684 { 685 const unsigned SUBOpc = getSUBriOpcode(Uses64BitFramePtr, Offset); 686 BuildMI(MBB, MBBI, DL, TII.get(SUBOpc), FinalStackProbed) 687 .addReg(FinalStackProbed) 688 .addImm(Offset / StackProbeSize * StackProbeSize) 689 .setMIFlag(MachineInstr::FrameSetup); 690 } 691 692 // allocate a page 693 { 694 const unsigned SUBOpc = getSUBriOpcode(Uses64BitFramePtr, StackProbeSize); 695 BuildMI(testMBB, DL, TII.get(SUBOpc), StackPtr) 696 .addReg(StackPtr) 697 .addImm(StackProbeSize) 698 .setMIFlag(MachineInstr::FrameSetup); 699 } 700 701 // touch the page 702 addRegOffset(BuildMI(testMBB, DL, TII.get(MovMIOpc)) 703 .setMIFlag(MachineInstr::FrameSetup), 704 StackPtr, false, 0) 705 .addImm(0) 706 .setMIFlag(MachineInstr::FrameSetup); 707 708 // cmp with stack pointer bound 709 BuildMI(testMBB, DL, TII.get(Uses64BitFramePtr ? X86::CMP64rr : X86::CMP32rr)) 710 .addReg(StackPtr) 711 .addReg(FinalStackProbed) 712 .setMIFlag(MachineInstr::FrameSetup); 713 714 // jump 715 BuildMI(testMBB, DL, TII.get(X86::JCC_1)) 716 .addMBB(testMBB) 717 .addImm(X86::COND_NE) 718 .setMIFlag(MachineInstr::FrameSetup); 719 testMBB->addSuccessor(testMBB); 720 testMBB->addSuccessor(tailMBB); 721 722 // BB management 723 tailMBB->splice(tailMBB->end(), &MBB, MBBI, MBB.end()); 724 tailMBB->transferSuccessorsAndUpdatePHIs(&MBB); 725 MBB.addSuccessor(testMBB); 726 727 // handle tail 728 unsigned TailOffset = Offset % StackProbeSize; 729 if (TailOffset) { 730 const unsigned Opc = getSUBriOpcode(Uses64BitFramePtr, TailOffset); 731 BuildMI(*tailMBB, tailMBB->begin(), DL, TII.get(Opc), StackPtr) 732 .addReg(StackPtr) 733 .addImm(TailOffset) 734 .setMIFlag(MachineInstr::FrameSetup); 735 } 736 737 // Update Live In information 738 recomputeLiveIns(*testMBB); 739 recomputeLiveIns(*tailMBB); 740 } 741 742 void X86FrameLowering::emitStackProbeInlineWindowsCoreCLR64( 743 MachineFunction &MF, MachineBasicBlock &MBB, 744 MachineBasicBlock::iterator MBBI, const DebugLoc &DL, bool InProlog) const { 745 const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>(); 746 assert(STI.is64Bit() && "different expansion needed for 32 bit"); 747 assert(STI.isTargetWindowsCoreCLR() && "custom expansion expects CoreCLR"); 748 const TargetInstrInfo &TII = *STI.getInstrInfo(); 749 const BasicBlock *LLVM_BB = MBB.getBasicBlock(); 750 751 // RAX contains the number of bytes of desired stack adjustment. 752 // The handling here assumes this value has already been updated so as to 753 // maintain stack alignment. 754 // 755 // We need to exit with RSP modified by this amount and execute suitable 756 // page touches to notify the OS that we're growing the stack responsibly. 757 // All stack probing must be done without modifying RSP. 758 // 759 // MBB: 760 // SizeReg = RAX; 761 // ZeroReg = 0 762 // CopyReg = RSP 763 // Flags, TestReg = CopyReg - SizeReg 764 // FinalReg = !Flags.Ovf ? TestReg : ZeroReg 765 // LimitReg = gs magic thread env access 766 // if FinalReg >= LimitReg goto ContinueMBB 767 // RoundBB: 768 // RoundReg = page address of FinalReg 769 // LoopMBB: 770 // LoopReg = PHI(LimitReg,ProbeReg) 771 // ProbeReg = LoopReg - PageSize 772 // [ProbeReg] = 0 773 // if (ProbeReg > RoundReg) goto LoopMBB 774 // ContinueMBB: 775 // RSP = RSP - RAX 776 // [rest of original MBB] 777 778 // Set up the new basic blocks 779 MachineBasicBlock *RoundMBB = MF.CreateMachineBasicBlock(LLVM_BB); 780 MachineBasicBlock *LoopMBB = MF.CreateMachineBasicBlock(LLVM_BB); 781 MachineBasicBlock *ContinueMBB = MF.CreateMachineBasicBlock(LLVM_BB); 782 783 MachineFunction::iterator MBBIter = std::next(MBB.getIterator()); 784 MF.insert(MBBIter, RoundMBB); 785 MF.insert(MBBIter, LoopMBB); 786 MF.insert(MBBIter, ContinueMBB); 787 788 // Split MBB and move the tail portion down to ContinueMBB. 789 MachineBasicBlock::iterator BeforeMBBI = std::prev(MBBI); 790 ContinueMBB->splice(ContinueMBB->begin(), &MBB, MBBI, MBB.end()); 791 ContinueMBB->transferSuccessorsAndUpdatePHIs(&MBB); 792 793 // Some useful constants 794 const int64_t ThreadEnvironmentStackLimit = 0x10; 795 const int64_t PageSize = 0x1000; 796 const int64_t PageMask = ~(PageSize - 1); 797 798 // Registers we need. For the normal case we use virtual 799 // registers. For the prolog expansion we use RAX, RCX and RDX. 800 MachineRegisterInfo &MRI = MF.getRegInfo(); 801 const TargetRegisterClass *RegClass = &X86::GR64RegClass; 802 const Register SizeReg = InProlog ? X86::RAX 803 : MRI.createVirtualRegister(RegClass), 804 ZeroReg = InProlog ? X86::RCX 805 : MRI.createVirtualRegister(RegClass), 806 CopyReg = InProlog ? X86::RDX 807 : MRI.createVirtualRegister(RegClass), 808 TestReg = InProlog ? X86::RDX 809 : MRI.createVirtualRegister(RegClass), 810 FinalReg = InProlog ? X86::RDX 811 : MRI.createVirtualRegister(RegClass), 812 RoundedReg = InProlog ? X86::RDX 813 : MRI.createVirtualRegister(RegClass), 814 LimitReg = InProlog ? X86::RCX 815 : MRI.createVirtualRegister(RegClass), 816 JoinReg = InProlog ? X86::RCX 817 : MRI.createVirtualRegister(RegClass), 818 ProbeReg = InProlog ? X86::RCX 819 : MRI.createVirtualRegister(RegClass); 820 821 // SP-relative offsets where we can save RCX and RDX. 822 int64_t RCXShadowSlot = 0; 823 int64_t RDXShadowSlot = 0; 824 825 // If inlining in the prolog, save RCX and RDX. 826 if (InProlog) { 827 // Compute the offsets. We need to account for things already 828 // pushed onto the stack at this point: return address, frame 829 // pointer (if used), and callee saves. 830 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); 831 const int64_t CalleeSaveSize = X86FI->getCalleeSavedFrameSize(); 832 const bool HasFP = hasFP(MF); 833 834 // Check if we need to spill RCX and/or RDX. 835 // Here we assume that no earlier prologue instruction changes RCX and/or 836 // RDX, so checking the block live-ins is enough. 837 const bool IsRCXLiveIn = MBB.isLiveIn(X86::RCX); 838 const bool IsRDXLiveIn = MBB.isLiveIn(X86::RDX); 839 int64_t InitSlot = 8 + CalleeSaveSize + (HasFP ? 8 : 0); 840 // Assign the initial slot to both registers, then change RDX's slot if both 841 // need to be spilled. 842 if (IsRCXLiveIn) 843 RCXShadowSlot = InitSlot; 844 if (IsRDXLiveIn) 845 RDXShadowSlot = InitSlot; 846 if (IsRDXLiveIn && IsRCXLiveIn) 847 RDXShadowSlot += 8; 848 // Emit the saves if needed. 849 if (IsRCXLiveIn) 850 addRegOffset(BuildMI(&MBB, DL, TII.get(X86::MOV64mr)), X86::RSP, false, 851 RCXShadowSlot) 852 .addReg(X86::RCX); 853 if (IsRDXLiveIn) 854 addRegOffset(BuildMI(&MBB, DL, TII.get(X86::MOV64mr)), X86::RSP, false, 855 RDXShadowSlot) 856 .addReg(X86::RDX); 857 } else { 858 // Not in the prolog. Copy RAX to a virtual reg. 859 BuildMI(&MBB, DL, TII.get(X86::MOV64rr), SizeReg).addReg(X86::RAX); 860 } 861 862 // Add code to MBB to check for overflow and set the new target stack pointer 863 // to zero if so. 864 BuildMI(&MBB, DL, TII.get(X86::XOR64rr), ZeroReg) 865 .addReg(ZeroReg, RegState::Undef) 866 .addReg(ZeroReg, RegState::Undef); 867 BuildMI(&MBB, DL, TII.get(X86::MOV64rr), CopyReg).addReg(X86::RSP); 868 BuildMI(&MBB, DL, TII.get(X86::SUB64rr), TestReg) 869 .addReg(CopyReg) 870 .addReg(SizeReg); 871 BuildMI(&MBB, DL, TII.get(X86::CMOV64rr), FinalReg) 872 .addReg(TestReg) 873 .addReg(ZeroReg) 874 .addImm(X86::COND_B); 875 876 // FinalReg now holds final stack pointer value, or zero if 877 // allocation would overflow. Compare against the current stack 878 // limit from the thread environment block. Note this limit is the 879 // lowest touched page on the stack, not the point at which the OS 880 // will cause an overflow exception, so this is just an optimization 881 // to avoid unnecessarily touching pages that are below the current 882 // SP but already committed to the stack by the OS. 883 BuildMI(&MBB, DL, TII.get(X86::MOV64rm), LimitReg) 884 .addReg(0) 885 .addImm(1) 886 .addReg(0) 887 .addImm(ThreadEnvironmentStackLimit) 888 .addReg(X86::GS); 889 BuildMI(&MBB, DL, TII.get(X86::CMP64rr)).addReg(FinalReg).addReg(LimitReg); 890 // Jump if the desired stack pointer is at or above the stack limit. 891 BuildMI(&MBB, DL, TII.get(X86::JCC_1)).addMBB(ContinueMBB).addImm(X86::COND_AE); 892 893 // Add code to roundMBB to round the final stack pointer to a page boundary. 894 RoundMBB->addLiveIn(FinalReg); 895 BuildMI(RoundMBB, DL, TII.get(X86::AND64ri32), RoundedReg) 896 .addReg(FinalReg) 897 .addImm(PageMask); 898 BuildMI(RoundMBB, DL, TII.get(X86::JMP_1)).addMBB(LoopMBB); 899 900 // LimitReg now holds the current stack limit, RoundedReg page-rounded 901 // final RSP value. Add code to loopMBB to decrement LimitReg page-by-page 902 // and probe until we reach RoundedReg. 903 if (!InProlog) { 904 BuildMI(LoopMBB, DL, TII.get(X86::PHI), JoinReg) 905 .addReg(LimitReg) 906 .addMBB(RoundMBB) 907 .addReg(ProbeReg) 908 .addMBB(LoopMBB); 909 } 910 911 LoopMBB->addLiveIn(JoinReg); 912 addRegOffset(BuildMI(LoopMBB, DL, TII.get(X86::LEA64r), ProbeReg), JoinReg, 913 false, -PageSize); 914 915 // Probe by storing a byte onto the stack. 916 BuildMI(LoopMBB, DL, TII.get(X86::MOV8mi)) 917 .addReg(ProbeReg) 918 .addImm(1) 919 .addReg(0) 920 .addImm(0) 921 .addReg(0) 922 .addImm(0); 923 924 LoopMBB->addLiveIn(RoundedReg); 925 BuildMI(LoopMBB, DL, TII.get(X86::CMP64rr)) 926 .addReg(RoundedReg) 927 .addReg(ProbeReg); 928 BuildMI(LoopMBB, DL, TII.get(X86::JCC_1)).addMBB(LoopMBB).addImm(X86::COND_NE); 929 930 MachineBasicBlock::iterator ContinueMBBI = ContinueMBB->getFirstNonPHI(); 931 932 // If in prolog, restore RDX and RCX. 933 if (InProlog) { 934 if (RCXShadowSlot) // It means we spilled RCX in the prologue. 935 addRegOffset(BuildMI(*ContinueMBB, ContinueMBBI, DL, 936 TII.get(X86::MOV64rm), X86::RCX), 937 X86::RSP, false, RCXShadowSlot); 938 if (RDXShadowSlot) // It means we spilled RDX in the prologue. 939 addRegOffset(BuildMI(*ContinueMBB, ContinueMBBI, DL, 940 TII.get(X86::MOV64rm), X86::RDX), 941 X86::RSP, false, RDXShadowSlot); 942 } 943 944 // Now that the probing is done, add code to continueMBB to update 945 // the stack pointer for real. 946 ContinueMBB->addLiveIn(SizeReg); 947 BuildMI(*ContinueMBB, ContinueMBBI, DL, TII.get(X86::SUB64rr), X86::RSP) 948 .addReg(X86::RSP) 949 .addReg(SizeReg); 950 951 // Add the control flow edges we need. 952 MBB.addSuccessor(ContinueMBB); 953 MBB.addSuccessor(RoundMBB); 954 RoundMBB->addSuccessor(LoopMBB); 955 LoopMBB->addSuccessor(ContinueMBB); 956 LoopMBB->addSuccessor(LoopMBB); 957 958 // Mark all the instructions added to the prolog as frame setup. 959 if (InProlog) { 960 for (++BeforeMBBI; BeforeMBBI != MBB.end(); ++BeforeMBBI) { 961 BeforeMBBI->setFlag(MachineInstr::FrameSetup); 962 } 963 for (MachineInstr &MI : *RoundMBB) { 964 MI.setFlag(MachineInstr::FrameSetup); 965 } 966 for (MachineInstr &MI : *LoopMBB) { 967 MI.setFlag(MachineInstr::FrameSetup); 968 } 969 for (MachineBasicBlock::iterator CMBBI = ContinueMBB->begin(); 970 CMBBI != ContinueMBBI; ++CMBBI) { 971 CMBBI->setFlag(MachineInstr::FrameSetup); 972 } 973 } 974 } 975 976 void X86FrameLowering::emitStackProbeCall(MachineFunction &MF, 977 MachineBasicBlock &MBB, 978 MachineBasicBlock::iterator MBBI, 979 const DebugLoc &DL, 980 bool InProlog) const { 981 bool IsLargeCodeModel = MF.getTarget().getCodeModel() == CodeModel::Large; 982 983 // FIXME: Add indirect thunk support and remove this. 984 if (Is64Bit && IsLargeCodeModel && STI.useIndirectThunkCalls()) 985 report_fatal_error("Emitting stack probe calls on 64-bit with the large " 986 "code model and indirect thunks not yet implemented."); 987 988 unsigned CallOp; 989 if (Is64Bit) 990 CallOp = IsLargeCodeModel ? X86::CALL64r : X86::CALL64pcrel32; 991 else 992 CallOp = X86::CALLpcrel32; 993 994 StringRef Symbol = STI.getTargetLowering()->getStackProbeSymbolName(MF); 995 996 MachineInstrBuilder CI; 997 MachineBasicBlock::iterator ExpansionMBBI = std::prev(MBBI); 998 999 // All current stack probes take AX and SP as input, clobber flags, and 1000 // preserve all registers. x86_64 probes leave RSP unmodified. 1001 if (Is64Bit && MF.getTarget().getCodeModel() == CodeModel::Large) { 1002 // For the large code model, we have to call through a register. Use R11, 1003 // as it is scratch in all supported calling conventions. 1004 BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64ri), X86::R11) 1005 .addExternalSymbol(MF.createExternalSymbolName(Symbol)); 1006 CI = BuildMI(MBB, MBBI, DL, TII.get(CallOp)).addReg(X86::R11); 1007 } else { 1008 CI = BuildMI(MBB, MBBI, DL, TII.get(CallOp)) 1009 .addExternalSymbol(MF.createExternalSymbolName(Symbol)); 1010 } 1011 1012 unsigned AX = Uses64BitFramePtr ? X86::RAX : X86::EAX; 1013 unsigned SP = Uses64BitFramePtr ? X86::RSP : X86::ESP; 1014 CI.addReg(AX, RegState::Implicit) 1015 .addReg(SP, RegState::Implicit) 1016 .addReg(AX, RegState::Define | RegState::Implicit) 1017 .addReg(SP, RegState::Define | RegState::Implicit) 1018 .addReg(X86::EFLAGS, RegState::Define | RegState::Implicit); 1019 1020 if (STI.isTargetWin64() || !STI.isOSWindows()) { 1021 // MSVC x32's _chkstk and cygwin/mingw's _alloca adjust %esp themselves. 1022 // MSVC x64's __chkstk and cygwin/mingw's ___chkstk_ms do not adjust %rsp 1023 // themselves. They also does not clobber %rax so we can reuse it when 1024 // adjusting %rsp. 1025 // All other platforms do not specify a particular ABI for the stack probe 1026 // function, so we arbitrarily define it to not adjust %esp/%rsp itself. 1027 BuildMI(MBB, MBBI, DL, TII.get(getSUBrrOpcode(Uses64BitFramePtr)), SP) 1028 .addReg(SP) 1029 .addReg(AX); 1030 } 1031 1032 if (InProlog) { 1033 // Apply the frame setup flag to all inserted instrs. 1034 for (++ExpansionMBBI; ExpansionMBBI != MBBI; ++ExpansionMBBI) 1035 ExpansionMBBI->setFlag(MachineInstr::FrameSetup); 1036 } 1037 } 1038 1039 static unsigned calculateSetFPREG(uint64_t SPAdjust) { 1040 // Win64 ABI has a less restrictive limitation of 240; 128 works equally well 1041 // and might require smaller successive adjustments. 1042 const uint64_t Win64MaxSEHOffset = 128; 1043 uint64_t SEHFrameOffset = std::min(SPAdjust, Win64MaxSEHOffset); 1044 // Win64 ABI requires 16-byte alignment for the UWOP_SET_FPREG opcode. 1045 return SEHFrameOffset & -16; 1046 } 1047 1048 // If we're forcing a stack realignment we can't rely on just the frame 1049 // info, we need to know the ABI stack alignment as well in case we 1050 // have a call out. Otherwise just make sure we have some alignment - we'll 1051 // go with the minimum SlotSize. 1052 uint64_t X86FrameLowering::calculateMaxStackAlign(const MachineFunction &MF) const { 1053 const MachineFrameInfo &MFI = MF.getFrameInfo(); 1054 Align MaxAlign = MFI.getMaxAlign(); // Desired stack alignment. 1055 Align StackAlign = getStackAlign(); 1056 if (MF.getFunction().hasFnAttribute("stackrealign")) { 1057 if (MFI.hasCalls()) 1058 MaxAlign = (StackAlign > MaxAlign) ? StackAlign : MaxAlign; 1059 else if (MaxAlign < SlotSize) 1060 MaxAlign = Align(SlotSize); 1061 } 1062 return MaxAlign.value(); 1063 } 1064 1065 void X86FrameLowering::BuildStackAlignAND(MachineBasicBlock &MBB, 1066 MachineBasicBlock::iterator MBBI, 1067 const DebugLoc &DL, unsigned Reg, 1068 uint64_t MaxAlign) const { 1069 uint64_t Val = -MaxAlign; 1070 unsigned AndOp = getANDriOpcode(Uses64BitFramePtr, Val); 1071 1072 MachineFunction &MF = *MBB.getParent(); 1073 const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>(); 1074 const X86TargetLowering &TLI = *STI.getTargetLowering(); 1075 const uint64_t StackProbeSize = TLI.getStackProbeSize(MF); 1076 const bool EmitInlineStackProbe = TLI.hasInlineStackProbe(MF); 1077 1078 // We want to make sure that (in worst case) less than StackProbeSize bytes 1079 // are not probed after the AND. This assumption is used in 1080 // emitStackProbeInlineGeneric. 1081 if (Reg == StackPtr && EmitInlineStackProbe && MaxAlign >= StackProbeSize) { 1082 { 1083 NumFrameLoopProbe++; 1084 MachineBasicBlock *entryMBB = 1085 MF.CreateMachineBasicBlock(MBB.getBasicBlock()); 1086 MachineBasicBlock *headMBB = 1087 MF.CreateMachineBasicBlock(MBB.getBasicBlock()); 1088 MachineBasicBlock *bodyMBB = 1089 MF.CreateMachineBasicBlock(MBB.getBasicBlock()); 1090 MachineBasicBlock *footMBB = 1091 MF.CreateMachineBasicBlock(MBB.getBasicBlock()); 1092 1093 MachineFunction::iterator MBBIter = MBB.getIterator(); 1094 MF.insert(MBBIter, entryMBB); 1095 MF.insert(MBBIter, headMBB); 1096 MF.insert(MBBIter, bodyMBB); 1097 MF.insert(MBBIter, footMBB); 1098 const unsigned MovMIOpc = Is64Bit ? X86::MOV64mi32 : X86::MOV32mi; 1099 Register FinalStackProbed = Uses64BitFramePtr ? X86::R11 1100 : Is64Bit ? X86::R11D 1101 : X86::EAX; 1102 1103 // Setup entry block 1104 { 1105 1106 entryMBB->splice(entryMBB->end(), &MBB, MBB.begin(), MBBI); 1107 BuildMI(entryMBB, DL, TII.get(TargetOpcode::COPY), FinalStackProbed) 1108 .addReg(StackPtr) 1109 .setMIFlag(MachineInstr::FrameSetup); 1110 MachineInstr *MI = 1111 BuildMI(entryMBB, DL, TII.get(AndOp), FinalStackProbed) 1112 .addReg(FinalStackProbed) 1113 .addImm(Val) 1114 .setMIFlag(MachineInstr::FrameSetup); 1115 1116 // The EFLAGS implicit def is dead. 1117 MI->getOperand(3).setIsDead(); 1118 1119 BuildMI(entryMBB, DL, 1120 TII.get(Uses64BitFramePtr ? X86::CMP64rr : X86::CMP32rr)) 1121 .addReg(FinalStackProbed) 1122 .addReg(StackPtr) 1123 .setMIFlag(MachineInstr::FrameSetup); 1124 BuildMI(entryMBB, DL, TII.get(X86::JCC_1)) 1125 .addMBB(&MBB) 1126 .addImm(X86::COND_E) 1127 .setMIFlag(MachineInstr::FrameSetup); 1128 entryMBB->addSuccessor(headMBB); 1129 entryMBB->addSuccessor(&MBB); 1130 } 1131 1132 // Loop entry block 1133 1134 { 1135 const unsigned SUBOpc = 1136 getSUBriOpcode(Uses64BitFramePtr, StackProbeSize); 1137 BuildMI(headMBB, DL, TII.get(SUBOpc), StackPtr) 1138 .addReg(StackPtr) 1139 .addImm(StackProbeSize) 1140 .setMIFlag(MachineInstr::FrameSetup); 1141 1142 BuildMI(headMBB, DL, 1143 TII.get(Uses64BitFramePtr ? X86::CMP64rr : X86::CMP32rr)) 1144 .addReg(FinalStackProbed) 1145 .addReg(StackPtr) 1146 .setMIFlag(MachineInstr::FrameSetup); 1147 1148 // jump 1149 BuildMI(headMBB, DL, TII.get(X86::JCC_1)) 1150 .addMBB(footMBB) 1151 .addImm(X86::COND_B) 1152 .setMIFlag(MachineInstr::FrameSetup); 1153 1154 headMBB->addSuccessor(bodyMBB); 1155 headMBB->addSuccessor(footMBB); 1156 } 1157 1158 // setup loop body 1159 { 1160 addRegOffset(BuildMI(bodyMBB, DL, TII.get(MovMIOpc)) 1161 .setMIFlag(MachineInstr::FrameSetup), 1162 StackPtr, false, 0) 1163 .addImm(0) 1164 .setMIFlag(MachineInstr::FrameSetup); 1165 1166 const unsigned SUBOpc = 1167 getSUBriOpcode(Uses64BitFramePtr, StackProbeSize); 1168 BuildMI(bodyMBB, DL, TII.get(SUBOpc), StackPtr) 1169 .addReg(StackPtr) 1170 .addImm(StackProbeSize) 1171 .setMIFlag(MachineInstr::FrameSetup); 1172 1173 // cmp with stack pointer bound 1174 BuildMI(bodyMBB, DL, 1175 TII.get(Uses64BitFramePtr ? X86::CMP64rr : X86::CMP32rr)) 1176 .addReg(FinalStackProbed) 1177 .addReg(StackPtr) 1178 .setMIFlag(MachineInstr::FrameSetup); 1179 1180 // jump 1181 BuildMI(bodyMBB, DL, TII.get(X86::JCC_1)) 1182 .addMBB(bodyMBB) 1183 .addImm(X86::COND_B) 1184 .setMIFlag(MachineInstr::FrameSetup); 1185 bodyMBB->addSuccessor(bodyMBB); 1186 bodyMBB->addSuccessor(footMBB); 1187 } 1188 1189 // setup loop footer 1190 { 1191 BuildMI(footMBB, DL, TII.get(TargetOpcode::COPY), StackPtr) 1192 .addReg(FinalStackProbed) 1193 .setMIFlag(MachineInstr::FrameSetup); 1194 addRegOffset(BuildMI(footMBB, DL, TII.get(MovMIOpc)) 1195 .setMIFlag(MachineInstr::FrameSetup), 1196 StackPtr, false, 0) 1197 .addImm(0) 1198 .setMIFlag(MachineInstr::FrameSetup); 1199 footMBB->addSuccessor(&MBB); 1200 } 1201 1202 recomputeLiveIns(*headMBB); 1203 recomputeLiveIns(*bodyMBB); 1204 recomputeLiveIns(*footMBB); 1205 recomputeLiveIns(MBB); 1206 } 1207 } else { 1208 MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII.get(AndOp), Reg) 1209 .addReg(Reg) 1210 .addImm(Val) 1211 .setMIFlag(MachineInstr::FrameSetup); 1212 1213 // The EFLAGS implicit def is dead. 1214 MI->getOperand(3).setIsDead(); 1215 } 1216 } 1217 1218 bool X86FrameLowering::has128ByteRedZone(const MachineFunction& MF) const { 1219 // x86-64 (non Win64) has a 128 byte red zone which is guaranteed not to be 1220 // clobbered by any interrupt handler. 1221 assert(&STI == &MF.getSubtarget<X86Subtarget>() && 1222 "MF used frame lowering for wrong subtarget"); 1223 const Function &Fn = MF.getFunction(); 1224 const bool IsWin64CC = STI.isCallingConvWin64(Fn.getCallingConv()); 1225 return Is64Bit && !IsWin64CC && !Fn.hasFnAttribute(Attribute::NoRedZone); 1226 } 1227 1228 bool X86FrameLowering::isWin64Prologue(const MachineFunction &MF) const { 1229 return MF.getTarget().getMCAsmInfo()->usesWindowsCFI(); 1230 } 1231 1232 bool X86FrameLowering::needsDwarfCFI(const MachineFunction &MF) const { 1233 return !isWin64Prologue(MF) && MF.needsFrameMoves(); 1234 } 1235 1236 /// emitPrologue - Push callee-saved registers onto the stack, which 1237 /// automatically adjust the stack pointer. Adjust the stack pointer to allocate 1238 /// space for local variables. Also emit labels used by the exception handler to 1239 /// generate the exception handling frames. 1240 1241 /* 1242 Here's a gist of what gets emitted: 1243 1244 ; Establish frame pointer, if needed 1245 [if needs FP] 1246 push %rbp 1247 .cfi_def_cfa_offset 16 1248 .cfi_offset %rbp, -16 1249 .seh_pushreg %rpb 1250 mov %rsp, %rbp 1251 .cfi_def_cfa_register %rbp 1252 1253 ; Spill general-purpose registers 1254 [for all callee-saved GPRs] 1255 pushq %<reg> 1256 [if not needs FP] 1257 .cfi_def_cfa_offset (offset from RETADDR) 1258 .seh_pushreg %<reg> 1259 1260 ; If the required stack alignment > default stack alignment 1261 ; rsp needs to be re-aligned. This creates a "re-alignment gap" 1262 ; of unknown size in the stack frame. 1263 [if stack needs re-alignment] 1264 and $MASK, %rsp 1265 1266 ; Allocate space for locals 1267 [if target is Windows and allocated space > 4096 bytes] 1268 ; Windows needs special care for allocations larger 1269 ; than one page. 1270 mov $NNN, %rax 1271 call ___chkstk_ms/___chkstk 1272 sub %rax, %rsp 1273 [else] 1274 sub $NNN, %rsp 1275 1276 [if needs FP] 1277 .seh_stackalloc (size of XMM spill slots) 1278 .seh_setframe %rbp, SEHFrameOffset ; = size of all spill slots 1279 [else] 1280 .seh_stackalloc NNN 1281 1282 ; Spill XMMs 1283 ; Note, that while only Windows 64 ABI specifies XMMs as callee-preserved, 1284 ; they may get spilled on any platform, if the current function 1285 ; calls @llvm.eh.unwind.init 1286 [if needs FP] 1287 [for all callee-saved XMM registers] 1288 movaps %<xmm reg>, -MMM(%rbp) 1289 [for all callee-saved XMM registers] 1290 .seh_savexmm %<xmm reg>, (-MMM + SEHFrameOffset) 1291 ; i.e. the offset relative to (%rbp - SEHFrameOffset) 1292 [else] 1293 [for all callee-saved XMM registers] 1294 movaps %<xmm reg>, KKK(%rsp) 1295 [for all callee-saved XMM registers] 1296 .seh_savexmm %<xmm reg>, KKK 1297 1298 .seh_endprologue 1299 1300 [if needs base pointer] 1301 mov %rsp, %rbx 1302 [if needs to restore base pointer] 1303 mov %rsp, -MMM(%rbp) 1304 1305 ; Emit CFI info 1306 [if needs FP] 1307 [for all callee-saved registers] 1308 .cfi_offset %<reg>, (offset from %rbp) 1309 [else] 1310 .cfi_def_cfa_offset (offset from RETADDR) 1311 [for all callee-saved registers] 1312 .cfi_offset %<reg>, (offset from %rsp) 1313 1314 Notes: 1315 - .seh directives are emitted only for Windows 64 ABI 1316 - .cv_fpo directives are emitted on win32 when emitting CodeView 1317 - .cfi directives are emitted for all other ABIs 1318 - for 32-bit code, substitute %e?? registers for %r?? 1319 */ 1320 1321 void X86FrameLowering::emitPrologue(MachineFunction &MF, 1322 MachineBasicBlock &MBB) const { 1323 assert(&STI == &MF.getSubtarget<X86Subtarget>() && 1324 "MF used frame lowering for wrong subtarget"); 1325 MachineBasicBlock::iterator MBBI = MBB.begin(); 1326 MachineFrameInfo &MFI = MF.getFrameInfo(); 1327 const Function &Fn = MF.getFunction(); 1328 MachineModuleInfo &MMI = MF.getMMI(); 1329 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); 1330 uint64_t MaxAlign = calculateMaxStackAlign(MF); // Desired stack alignment. 1331 uint64_t StackSize = MFI.getStackSize(); // Number of bytes to allocate. 1332 bool IsFunclet = MBB.isEHFuncletEntry(); 1333 EHPersonality Personality = EHPersonality::Unknown; 1334 if (Fn.hasPersonalityFn()) 1335 Personality = classifyEHPersonality(Fn.getPersonalityFn()); 1336 bool FnHasClrFunclet = 1337 MF.hasEHFunclets() && Personality == EHPersonality::CoreCLR; 1338 bool IsClrFunclet = IsFunclet && FnHasClrFunclet; 1339 bool HasFP = hasFP(MF); 1340 bool IsWin64Prologue = isWin64Prologue(MF); 1341 bool NeedsWin64CFI = IsWin64Prologue && Fn.needsUnwindTableEntry(); 1342 // FIXME: Emit FPO data for EH funclets. 1343 bool NeedsWinFPO = 1344 !IsFunclet && STI.isTargetWin32() && MMI.getModule()->getCodeViewFlag(); 1345 bool NeedsWinCFI = NeedsWin64CFI || NeedsWinFPO; 1346 bool NeedsDwarfCFI = needsDwarfCFI(MF); 1347 Register FramePtr = TRI->getFrameRegister(MF); 1348 const Register MachineFramePtr = 1349 STI.isTarget64BitILP32() 1350 ? Register(getX86SubSuperRegister(FramePtr, 64)) : FramePtr; 1351 Register BasePtr = TRI->getBaseRegister(); 1352 bool HasWinCFI = false; 1353 1354 // Debug location must be unknown since the first debug location is used 1355 // to determine the end of the prologue. 1356 DebugLoc DL; 1357 1358 // Space reserved for stack-based arguments when making a (ABI-guaranteed) 1359 // tail call. 1360 unsigned TailCallArgReserveSize = -X86FI->getTCReturnAddrDelta(); 1361 if (TailCallArgReserveSize && IsWin64Prologue) 1362 report_fatal_error("Can't handle guaranteed tail call under win64 yet"); 1363 1364 const bool EmitStackProbeCall = 1365 STI.getTargetLowering()->hasStackProbeSymbol(MF); 1366 unsigned StackProbeSize = STI.getTargetLowering()->getStackProbeSize(MF); 1367 1368 if (HasFP && X86FI->hasSwiftAsyncContext()) { 1369 switch (MF.getTarget().Options.SwiftAsyncFramePointer) { 1370 case SwiftAsyncFramePointerMode::DeploymentBased: 1371 if (STI.swiftAsyncContextIsDynamicallySet()) { 1372 // The special symbol below is absolute and has a *value* suitable to be 1373 // combined with the frame pointer directly. 1374 BuildMI(MBB, MBBI, DL, TII.get(X86::OR64rm), MachineFramePtr) 1375 .addUse(MachineFramePtr) 1376 .addUse(X86::RIP) 1377 .addImm(1) 1378 .addUse(X86::NoRegister) 1379 .addExternalSymbol("swift_async_extendedFramePointerFlags", 1380 X86II::MO_GOTPCREL) 1381 .addUse(X86::NoRegister); 1382 break; 1383 } 1384 LLVM_FALLTHROUGH; 1385 1386 case SwiftAsyncFramePointerMode::Always: 1387 BuildMI(MBB, MBBI, DL, TII.get(X86::BTS64ri8), MachineFramePtr) 1388 .addUse(MachineFramePtr) 1389 .addImm(60) 1390 .setMIFlag(MachineInstr::FrameSetup); 1391 break; 1392 1393 case SwiftAsyncFramePointerMode::Never: 1394 break; 1395 } 1396 } 1397 1398 // Re-align the stack on 64-bit if the x86-interrupt calling convention is 1399 // used and an error code was pushed, since the x86-64 ABI requires a 16-byte 1400 // stack alignment. 1401 if (Fn.getCallingConv() == CallingConv::X86_INTR && Is64Bit && 1402 Fn.arg_size() == 2) { 1403 StackSize += 8; 1404 MFI.setStackSize(StackSize); 1405 emitSPUpdate(MBB, MBBI, DL, -8, /*InEpilogue=*/false); 1406 } 1407 1408 // If this is x86-64 and the Red Zone is not disabled, if we are a leaf 1409 // function, and use up to 128 bytes of stack space, don't have a frame 1410 // pointer, calls, or dynamic alloca then we do not need to adjust the 1411 // stack pointer (we fit in the Red Zone). We also check that we don't 1412 // push and pop from the stack. 1413 if (has128ByteRedZone(MF) && !TRI->hasStackRealignment(MF) && 1414 !MFI.hasVarSizedObjects() && // No dynamic alloca. 1415 !MFI.adjustsStack() && // No calls. 1416 !EmitStackProbeCall && // No stack probes. 1417 !MFI.hasCopyImplyingStackAdjustment() && // Don't push and pop. 1418 !MF.shouldSplitStack()) { // Regular stack 1419 uint64_t MinSize = 1420 X86FI->getCalleeSavedFrameSize() - X86FI->getTCReturnAddrDelta(); 1421 if (HasFP) MinSize += SlotSize; 1422 X86FI->setUsesRedZone(MinSize > 0 || StackSize > 0); 1423 StackSize = std::max(MinSize, StackSize > 128 ? StackSize - 128 : 0); 1424 MFI.setStackSize(StackSize); 1425 } 1426 1427 // Insert stack pointer adjustment for later moving of return addr. Only 1428 // applies to tail call optimized functions where the callee argument stack 1429 // size is bigger than the callers. 1430 if (TailCallArgReserveSize != 0) { 1431 BuildStackAdjustment(MBB, MBBI, DL, -(int)TailCallArgReserveSize, 1432 /*InEpilogue=*/false) 1433 .setMIFlag(MachineInstr::FrameSetup); 1434 } 1435 1436 // Mapping for machine moves: 1437 // 1438 // DST: VirtualFP AND 1439 // SRC: VirtualFP => DW_CFA_def_cfa_offset 1440 // ELSE => DW_CFA_def_cfa 1441 // 1442 // SRC: VirtualFP AND 1443 // DST: Register => DW_CFA_def_cfa_register 1444 // 1445 // ELSE 1446 // OFFSET < 0 => DW_CFA_offset_extended_sf 1447 // REG < 64 => DW_CFA_offset + Reg 1448 // ELSE => DW_CFA_offset_extended 1449 1450 uint64_t NumBytes = 0; 1451 int stackGrowth = -SlotSize; 1452 1453 // Find the funclet establisher parameter 1454 Register Establisher = X86::NoRegister; 1455 if (IsClrFunclet) 1456 Establisher = Uses64BitFramePtr ? X86::RCX : X86::ECX; 1457 else if (IsFunclet) 1458 Establisher = Uses64BitFramePtr ? X86::RDX : X86::EDX; 1459 1460 if (IsWin64Prologue && IsFunclet && !IsClrFunclet) { 1461 // Immediately spill establisher into the home slot. 1462 // The runtime cares about this. 1463 // MOV64mr %rdx, 16(%rsp) 1464 unsigned MOVmr = Uses64BitFramePtr ? X86::MOV64mr : X86::MOV32mr; 1465 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(MOVmr)), StackPtr, true, 16) 1466 .addReg(Establisher) 1467 .setMIFlag(MachineInstr::FrameSetup); 1468 MBB.addLiveIn(Establisher); 1469 } 1470 1471 if (HasFP) { 1472 assert(MF.getRegInfo().isReserved(MachineFramePtr) && "FP reserved"); 1473 1474 // Calculate required stack adjustment. 1475 uint64_t FrameSize = StackSize - SlotSize; 1476 // If required, include space for extra hidden slot for stashing base pointer. 1477 if (X86FI->getRestoreBasePointer()) 1478 FrameSize += SlotSize; 1479 1480 NumBytes = FrameSize - 1481 (X86FI->getCalleeSavedFrameSize() + TailCallArgReserveSize); 1482 1483 // Callee-saved registers are pushed on stack before the stack is realigned. 1484 if (TRI->hasStackRealignment(MF) && !IsWin64Prologue) 1485 NumBytes = alignTo(NumBytes, MaxAlign); 1486 1487 // Save EBP/RBP into the appropriate stack slot. 1488 BuildMI(MBB, MBBI, DL, TII.get(Is64Bit ? X86::PUSH64r : X86::PUSH32r)) 1489 .addReg(MachineFramePtr, RegState::Kill) 1490 .setMIFlag(MachineInstr::FrameSetup); 1491 1492 if (NeedsDwarfCFI) { 1493 // Mark the place where EBP/RBP was saved. 1494 // Define the current CFA rule to use the provided offset. 1495 assert(StackSize); 1496 BuildCFI(MBB, MBBI, DL, 1497 MCCFIInstruction::cfiDefCfaOffset(nullptr, -2 * stackGrowth)); 1498 1499 // Change the rule for the FramePtr to be an "offset" rule. 1500 unsigned DwarfFramePtr = TRI->getDwarfRegNum(MachineFramePtr, true); 1501 BuildCFI(MBB, MBBI, DL, MCCFIInstruction::createOffset( 1502 nullptr, DwarfFramePtr, 2 * stackGrowth)); 1503 } 1504 1505 if (NeedsWinCFI) { 1506 HasWinCFI = true; 1507 BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_PushReg)) 1508 .addImm(FramePtr) 1509 .setMIFlag(MachineInstr::FrameSetup); 1510 } 1511 1512 if (!IsFunclet) { 1513 if (X86FI->hasSwiftAsyncContext()) { 1514 const auto &Attrs = MF.getFunction().getAttributes(); 1515 1516 // Before we update the live frame pointer we have to ensure there's a 1517 // valid (or null) asynchronous context in its slot just before FP in 1518 // the frame record, so store it now. 1519 if (Attrs.hasAttrSomewhere(Attribute::SwiftAsync)) { 1520 // We have an initial context in r14, store it just before the frame 1521 // pointer. 1522 MBB.addLiveIn(X86::R14); 1523 BuildMI(MBB, MBBI, DL, TII.get(X86::PUSH64r)) 1524 .addReg(X86::R14) 1525 .setMIFlag(MachineInstr::FrameSetup); 1526 } else { 1527 // No initial context, store null so that there's no pointer that 1528 // could be misused. 1529 BuildMI(MBB, MBBI, DL, TII.get(X86::PUSH64i8)) 1530 .addImm(0) 1531 .setMIFlag(MachineInstr::FrameSetup); 1532 } 1533 1534 if (NeedsWinCFI) { 1535 HasWinCFI = true; 1536 BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_PushReg)) 1537 .addImm(X86::R14) 1538 .setMIFlag(MachineInstr::FrameSetup); 1539 } 1540 1541 BuildMI(MBB, MBBI, DL, TII.get(X86::LEA64r), FramePtr) 1542 .addUse(X86::RSP) 1543 .addImm(1) 1544 .addUse(X86::NoRegister) 1545 .addImm(8) 1546 .addUse(X86::NoRegister) 1547 .setMIFlag(MachineInstr::FrameSetup); 1548 BuildMI(MBB, MBBI, DL, TII.get(X86::SUB64ri8), X86::RSP) 1549 .addUse(X86::RSP) 1550 .addImm(8) 1551 .setMIFlag(MachineInstr::FrameSetup); 1552 } 1553 1554 if (!IsWin64Prologue && !IsFunclet) { 1555 // Update EBP with the new base value. 1556 if (!X86FI->hasSwiftAsyncContext()) 1557 BuildMI(MBB, MBBI, DL, 1558 TII.get(Uses64BitFramePtr ? X86::MOV64rr : X86::MOV32rr), 1559 FramePtr) 1560 .addReg(StackPtr) 1561 .setMIFlag(MachineInstr::FrameSetup); 1562 1563 if (NeedsDwarfCFI) { 1564 // Mark effective beginning of when frame pointer becomes valid. 1565 // Define the current CFA to use the EBP/RBP register. 1566 unsigned DwarfFramePtr = TRI->getDwarfRegNum(MachineFramePtr, true); 1567 BuildCFI( 1568 MBB, MBBI, DL, 1569 MCCFIInstruction::createDefCfaRegister(nullptr, DwarfFramePtr)); 1570 } 1571 1572 if (NeedsWinFPO) { 1573 // .cv_fpo_setframe $FramePtr 1574 HasWinCFI = true; 1575 BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_SetFrame)) 1576 .addImm(FramePtr) 1577 .addImm(0) 1578 .setMIFlag(MachineInstr::FrameSetup); 1579 } 1580 } 1581 } 1582 } else { 1583 assert(!IsFunclet && "funclets without FPs not yet implemented"); 1584 NumBytes = StackSize - 1585 (X86FI->getCalleeSavedFrameSize() + TailCallArgReserveSize); 1586 } 1587 1588 // Update the offset adjustment, which is mainly used by codeview to translate 1589 // from ESP to VFRAME relative local variable offsets. 1590 if (!IsFunclet) { 1591 if (HasFP && TRI->hasStackRealignment(MF)) 1592 MFI.setOffsetAdjustment(-NumBytes); 1593 else 1594 MFI.setOffsetAdjustment(-StackSize); 1595 } 1596 1597 // For EH funclets, only allocate enough space for outgoing calls. Save the 1598 // NumBytes value that we would've used for the parent frame. 1599 unsigned ParentFrameNumBytes = NumBytes; 1600 if (IsFunclet) 1601 NumBytes = getWinEHFuncletFrameSize(MF); 1602 1603 // Skip the callee-saved push instructions. 1604 bool PushedRegs = false; 1605 int StackOffset = 2 * stackGrowth; 1606 1607 while (MBBI != MBB.end() && 1608 MBBI->getFlag(MachineInstr::FrameSetup) && 1609 (MBBI->getOpcode() == X86::PUSH32r || 1610 MBBI->getOpcode() == X86::PUSH64r)) { 1611 PushedRegs = true; 1612 Register Reg = MBBI->getOperand(0).getReg(); 1613 ++MBBI; 1614 1615 if (!HasFP && NeedsDwarfCFI) { 1616 // Mark callee-saved push instruction. 1617 // Define the current CFA rule to use the provided offset. 1618 assert(StackSize); 1619 BuildCFI(MBB, MBBI, DL, 1620 MCCFIInstruction::cfiDefCfaOffset(nullptr, -StackOffset)); 1621 StackOffset += stackGrowth; 1622 } 1623 1624 if (NeedsWinCFI) { 1625 HasWinCFI = true; 1626 BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_PushReg)) 1627 .addImm(Reg) 1628 .setMIFlag(MachineInstr::FrameSetup); 1629 } 1630 } 1631 1632 // Realign stack after we pushed callee-saved registers (so that we'll be 1633 // able to calculate their offsets from the frame pointer). 1634 // Don't do this for Win64, it needs to realign the stack after the prologue. 1635 if (!IsWin64Prologue && !IsFunclet && TRI->hasStackRealignment(MF)) { 1636 assert(HasFP && "There should be a frame pointer if stack is realigned."); 1637 BuildStackAlignAND(MBB, MBBI, DL, StackPtr, MaxAlign); 1638 1639 if (NeedsWinCFI) { 1640 HasWinCFI = true; 1641 BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_StackAlign)) 1642 .addImm(MaxAlign) 1643 .setMIFlag(MachineInstr::FrameSetup); 1644 } 1645 } 1646 1647 // If there is an SUB32ri of ESP immediately before this instruction, merge 1648 // the two. This can be the case when tail call elimination is enabled and 1649 // the callee has more arguments then the caller. 1650 NumBytes -= mergeSPUpdates(MBB, MBBI, true); 1651 1652 // Adjust stack pointer: ESP -= numbytes. 1653 1654 // Windows and cygwin/mingw require a prologue helper routine when allocating 1655 // more than 4K bytes on the stack. Windows uses __chkstk and cygwin/mingw 1656 // uses __alloca. __alloca and the 32-bit version of __chkstk will probe the 1657 // stack and adjust the stack pointer in one go. The 64-bit version of 1658 // __chkstk is only responsible for probing the stack. The 64-bit prologue is 1659 // responsible for adjusting the stack pointer. Touching the stack at 4K 1660 // increments is necessary to ensure that the guard pages used by the OS 1661 // virtual memory manager are allocated in correct sequence. 1662 uint64_t AlignedNumBytes = NumBytes; 1663 if (IsWin64Prologue && !IsFunclet && TRI->hasStackRealignment(MF)) 1664 AlignedNumBytes = alignTo(AlignedNumBytes, MaxAlign); 1665 if (AlignedNumBytes >= StackProbeSize && EmitStackProbeCall) { 1666 assert(!X86FI->getUsesRedZone() && 1667 "The Red Zone is not accounted for in stack probes"); 1668 1669 // Check whether EAX is livein for this block. 1670 bool isEAXAlive = isEAXLiveIn(MBB); 1671 1672 if (isEAXAlive) { 1673 if (Is64Bit) { 1674 // Save RAX 1675 BuildMI(MBB, MBBI, DL, TII.get(X86::PUSH64r)) 1676 .addReg(X86::RAX, RegState::Kill) 1677 .setMIFlag(MachineInstr::FrameSetup); 1678 } else { 1679 // Save EAX 1680 BuildMI(MBB, MBBI, DL, TII.get(X86::PUSH32r)) 1681 .addReg(X86::EAX, RegState::Kill) 1682 .setMIFlag(MachineInstr::FrameSetup); 1683 } 1684 } 1685 1686 if (Is64Bit) { 1687 // Handle the 64-bit Windows ABI case where we need to call __chkstk. 1688 // Function prologue is responsible for adjusting the stack pointer. 1689 int64_t Alloc = isEAXAlive ? NumBytes - 8 : NumBytes; 1690 if (isUInt<32>(Alloc)) { 1691 BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32ri), X86::EAX) 1692 .addImm(Alloc) 1693 .setMIFlag(MachineInstr::FrameSetup); 1694 } else if (isInt<32>(Alloc)) { 1695 BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64ri32), X86::RAX) 1696 .addImm(Alloc) 1697 .setMIFlag(MachineInstr::FrameSetup); 1698 } else { 1699 BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64ri), X86::RAX) 1700 .addImm(Alloc) 1701 .setMIFlag(MachineInstr::FrameSetup); 1702 } 1703 } else { 1704 // Allocate NumBytes-4 bytes on stack in case of isEAXAlive. 1705 // We'll also use 4 already allocated bytes for EAX. 1706 BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32ri), X86::EAX) 1707 .addImm(isEAXAlive ? NumBytes - 4 : NumBytes) 1708 .setMIFlag(MachineInstr::FrameSetup); 1709 } 1710 1711 // Call __chkstk, __chkstk_ms, or __alloca. 1712 emitStackProbe(MF, MBB, MBBI, DL, true); 1713 1714 if (isEAXAlive) { 1715 // Restore RAX/EAX 1716 MachineInstr *MI; 1717 if (Is64Bit) 1718 MI = addRegOffset(BuildMI(MF, DL, TII.get(X86::MOV64rm), X86::RAX), 1719 StackPtr, false, NumBytes - 8); 1720 else 1721 MI = addRegOffset(BuildMI(MF, DL, TII.get(X86::MOV32rm), X86::EAX), 1722 StackPtr, false, NumBytes - 4); 1723 MI->setFlag(MachineInstr::FrameSetup); 1724 MBB.insert(MBBI, MI); 1725 } 1726 } else if (NumBytes) { 1727 emitSPUpdate(MBB, MBBI, DL, -(int64_t)NumBytes, /*InEpilogue=*/false); 1728 } 1729 1730 if (NeedsWinCFI && NumBytes) { 1731 HasWinCFI = true; 1732 BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_StackAlloc)) 1733 .addImm(NumBytes) 1734 .setMIFlag(MachineInstr::FrameSetup); 1735 } 1736 1737 int SEHFrameOffset = 0; 1738 unsigned SPOrEstablisher; 1739 if (IsFunclet) { 1740 if (IsClrFunclet) { 1741 // The establisher parameter passed to a CLR funclet is actually a pointer 1742 // to the (mostly empty) frame of its nearest enclosing funclet; we have 1743 // to find the root function establisher frame by loading the PSPSym from 1744 // the intermediate frame. 1745 unsigned PSPSlotOffset = getPSPSlotOffsetFromSP(MF); 1746 MachinePointerInfo NoInfo; 1747 MBB.addLiveIn(Establisher); 1748 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64rm), Establisher), 1749 Establisher, false, PSPSlotOffset) 1750 .addMemOperand(MF.getMachineMemOperand( 1751 NoInfo, MachineMemOperand::MOLoad, SlotSize, Align(SlotSize))); 1752 ; 1753 // Save the root establisher back into the current funclet's (mostly 1754 // empty) frame, in case a sub-funclet or the GC needs it. 1755 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64mr)), StackPtr, 1756 false, PSPSlotOffset) 1757 .addReg(Establisher) 1758 .addMemOperand(MF.getMachineMemOperand( 1759 NoInfo, 1760 MachineMemOperand::MOStore | MachineMemOperand::MOVolatile, 1761 SlotSize, Align(SlotSize))); 1762 } 1763 SPOrEstablisher = Establisher; 1764 } else { 1765 SPOrEstablisher = StackPtr; 1766 } 1767 1768 if (IsWin64Prologue && HasFP) { 1769 // Set RBP to a small fixed offset from RSP. In the funclet case, we base 1770 // this calculation on the incoming establisher, which holds the value of 1771 // RSP from the parent frame at the end of the prologue. 1772 SEHFrameOffset = calculateSetFPREG(ParentFrameNumBytes); 1773 if (SEHFrameOffset) 1774 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::LEA64r), FramePtr), 1775 SPOrEstablisher, false, SEHFrameOffset); 1776 else 1777 BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64rr), FramePtr) 1778 .addReg(SPOrEstablisher); 1779 1780 // If this is not a funclet, emit the CFI describing our frame pointer. 1781 if (NeedsWinCFI && !IsFunclet) { 1782 assert(!NeedsWinFPO && "this setframe incompatible with FPO data"); 1783 HasWinCFI = true; 1784 BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_SetFrame)) 1785 .addImm(FramePtr) 1786 .addImm(SEHFrameOffset) 1787 .setMIFlag(MachineInstr::FrameSetup); 1788 if (isAsynchronousEHPersonality(Personality)) 1789 MF.getWinEHFuncInfo()->SEHSetFrameOffset = SEHFrameOffset; 1790 } 1791 } else if (IsFunclet && STI.is32Bit()) { 1792 // Reset EBP / ESI to something good for funclets. 1793 MBBI = restoreWin32EHStackPointers(MBB, MBBI, DL); 1794 // If we're a catch funclet, we can be returned to via catchret. Save ESP 1795 // into the registration node so that the runtime will restore it for us. 1796 if (!MBB.isCleanupFuncletEntry()) { 1797 assert(Personality == EHPersonality::MSVC_CXX); 1798 Register FrameReg; 1799 int FI = MF.getWinEHFuncInfo()->EHRegNodeFrameIndex; 1800 int64_t EHRegOffset = getFrameIndexReference(MF, FI, FrameReg).getFixed(); 1801 // ESP is the first field, so no extra displacement is needed. 1802 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32mr)), FrameReg, 1803 false, EHRegOffset) 1804 .addReg(X86::ESP); 1805 } 1806 } 1807 1808 while (MBBI != MBB.end() && MBBI->getFlag(MachineInstr::FrameSetup)) { 1809 const MachineInstr &FrameInstr = *MBBI; 1810 ++MBBI; 1811 1812 if (NeedsWinCFI) { 1813 int FI; 1814 if (unsigned Reg = TII.isStoreToStackSlot(FrameInstr, FI)) { 1815 if (X86::FR64RegClass.contains(Reg)) { 1816 int Offset; 1817 Register IgnoredFrameReg; 1818 if (IsWin64Prologue && IsFunclet) 1819 Offset = getWin64EHFrameIndexRef(MF, FI, IgnoredFrameReg); 1820 else 1821 Offset = 1822 getFrameIndexReference(MF, FI, IgnoredFrameReg).getFixed() + 1823 SEHFrameOffset; 1824 1825 HasWinCFI = true; 1826 assert(!NeedsWinFPO && "SEH_SaveXMM incompatible with FPO data"); 1827 BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_SaveXMM)) 1828 .addImm(Reg) 1829 .addImm(Offset) 1830 .setMIFlag(MachineInstr::FrameSetup); 1831 } 1832 } 1833 } 1834 } 1835 1836 if (NeedsWinCFI && HasWinCFI) 1837 BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_EndPrologue)) 1838 .setMIFlag(MachineInstr::FrameSetup); 1839 1840 if (FnHasClrFunclet && !IsFunclet) { 1841 // Save the so-called Initial-SP (i.e. the value of the stack pointer 1842 // immediately after the prolog) into the PSPSlot so that funclets 1843 // and the GC can recover it. 1844 unsigned PSPSlotOffset = getPSPSlotOffsetFromSP(MF); 1845 auto PSPInfo = MachinePointerInfo::getFixedStack( 1846 MF, MF.getWinEHFuncInfo()->PSPSymFrameIdx); 1847 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64mr)), StackPtr, false, 1848 PSPSlotOffset) 1849 .addReg(StackPtr) 1850 .addMemOperand(MF.getMachineMemOperand( 1851 PSPInfo, MachineMemOperand::MOStore | MachineMemOperand::MOVolatile, 1852 SlotSize, Align(SlotSize))); 1853 } 1854 1855 // Realign stack after we spilled callee-saved registers (so that we'll be 1856 // able to calculate their offsets from the frame pointer). 1857 // Win64 requires aligning the stack after the prologue. 1858 if (IsWin64Prologue && TRI->hasStackRealignment(MF)) { 1859 assert(HasFP && "There should be a frame pointer if stack is realigned."); 1860 BuildStackAlignAND(MBB, MBBI, DL, SPOrEstablisher, MaxAlign); 1861 } 1862 1863 // We already dealt with stack realignment and funclets above. 1864 if (IsFunclet && STI.is32Bit()) 1865 return; 1866 1867 // If we need a base pointer, set it up here. It's whatever the value 1868 // of the stack pointer is at this point. Any variable size objects 1869 // will be allocated after this, so we can still use the base pointer 1870 // to reference locals. 1871 if (TRI->hasBasePointer(MF)) { 1872 // Update the base pointer with the current stack pointer. 1873 unsigned Opc = Uses64BitFramePtr ? X86::MOV64rr : X86::MOV32rr; 1874 BuildMI(MBB, MBBI, DL, TII.get(Opc), BasePtr) 1875 .addReg(SPOrEstablisher) 1876 .setMIFlag(MachineInstr::FrameSetup); 1877 if (X86FI->getRestoreBasePointer()) { 1878 // Stash value of base pointer. Saving RSP instead of EBP shortens 1879 // dependence chain. Used by SjLj EH. 1880 unsigned Opm = Uses64BitFramePtr ? X86::MOV64mr : X86::MOV32mr; 1881 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(Opm)), 1882 FramePtr, true, X86FI->getRestoreBasePointerOffset()) 1883 .addReg(SPOrEstablisher) 1884 .setMIFlag(MachineInstr::FrameSetup); 1885 } 1886 1887 if (X86FI->getHasSEHFramePtrSave() && !IsFunclet) { 1888 // Stash the value of the frame pointer relative to the base pointer for 1889 // Win32 EH. This supports Win32 EH, which does the inverse of the above: 1890 // it recovers the frame pointer from the base pointer rather than the 1891 // other way around. 1892 unsigned Opm = Uses64BitFramePtr ? X86::MOV64mr : X86::MOV32mr; 1893 Register UsedReg; 1894 int Offset = 1895 getFrameIndexReference(MF, X86FI->getSEHFramePtrSaveIndex(), UsedReg) 1896 .getFixed(); 1897 assert(UsedReg == BasePtr); 1898 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(Opm)), UsedReg, true, Offset) 1899 .addReg(FramePtr) 1900 .setMIFlag(MachineInstr::FrameSetup); 1901 } 1902 } 1903 1904 if (((!HasFP && NumBytes) || PushedRegs) && NeedsDwarfCFI) { 1905 // Mark end of stack pointer adjustment. 1906 if (!HasFP && NumBytes) { 1907 // Define the current CFA rule to use the provided offset. 1908 assert(StackSize); 1909 BuildCFI( 1910 MBB, MBBI, DL, 1911 MCCFIInstruction::cfiDefCfaOffset(nullptr, StackSize - stackGrowth)); 1912 } 1913 1914 // Emit DWARF info specifying the offsets of the callee-saved registers. 1915 emitCalleeSavedFrameMoves(MBB, MBBI, DL, true); 1916 } 1917 1918 // X86 Interrupt handling function cannot assume anything about the direction 1919 // flag (DF in EFLAGS register). Clear this flag by creating "cld" instruction 1920 // in each prologue of interrupt handler function. 1921 // 1922 // FIXME: Create "cld" instruction only in these cases: 1923 // 1. The interrupt handling function uses any of the "rep" instructions. 1924 // 2. Interrupt handling function calls another function. 1925 // 1926 if (Fn.getCallingConv() == CallingConv::X86_INTR) 1927 BuildMI(MBB, MBBI, DL, TII.get(X86::CLD)) 1928 .setMIFlag(MachineInstr::FrameSetup); 1929 1930 // At this point we know if the function has WinCFI or not. 1931 MF.setHasWinCFI(HasWinCFI); 1932 } 1933 1934 bool X86FrameLowering::canUseLEAForSPInEpilogue( 1935 const MachineFunction &MF) const { 1936 // We can't use LEA instructions for adjusting the stack pointer if we don't 1937 // have a frame pointer in the Win64 ABI. Only ADD instructions may be used 1938 // to deallocate the stack. 1939 // This means that we can use LEA for SP in two situations: 1940 // 1. We *aren't* using the Win64 ABI which means we are free to use LEA. 1941 // 2. We *have* a frame pointer which means we are permitted to use LEA. 1942 return !MF.getTarget().getMCAsmInfo()->usesWindowsCFI() || hasFP(MF); 1943 } 1944 1945 static bool isFuncletReturnInstr(MachineInstr &MI) { 1946 switch (MI.getOpcode()) { 1947 case X86::CATCHRET: 1948 case X86::CLEANUPRET: 1949 return true; 1950 default: 1951 return false; 1952 } 1953 llvm_unreachable("impossible"); 1954 } 1955 1956 // CLR funclets use a special "Previous Stack Pointer Symbol" slot on the 1957 // stack. It holds a pointer to the bottom of the root function frame. The 1958 // establisher frame pointer passed to a nested funclet may point to the 1959 // (mostly empty) frame of its parent funclet, but it will need to find 1960 // the frame of the root function to access locals. To facilitate this, 1961 // every funclet copies the pointer to the bottom of the root function 1962 // frame into a PSPSym slot in its own (mostly empty) stack frame. Using the 1963 // same offset for the PSPSym in the root function frame that's used in the 1964 // funclets' frames allows each funclet to dynamically accept any ancestor 1965 // frame as its establisher argument (the runtime doesn't guarantee the 1966 // immediate parent for some reason lost to history), and also allows the GC, 1967 // which uses the PSPSym for some bookkeeping, to find it in any funclet's 1968 // frame with only a single offset reported for the entire method. 1969 unsigned 1970 X86FrameLowering::getPSPSlotOffsetFromSP(const MachineFunction &MF) const { 1971 const WinEHFuncInfo &Info = *MF.getWinEHFuncInfo(); 1972 Register SPReg; 1973 int Offset = getFrameIndexReferencePreferSP(MF, Info.PSPSymFrameIdx, SPReg, 1974 /*IgnoreSPUpdates*/ true) 1975 .getFixed(); 1976 assert(Offset >= 0 && SPReg == TRI->getStackRegister()); 1977 return static_cast<unsigned>(Offset); 1978 } 1979 1980 unsigned 1981 X86FrameLowering::getWinEHFuncletFrameSize(const MachineFunction &MF) const { 1982 const X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); 1983 // This is the size of the pushed CSRs. 1984 unsigned CSSize = X86FI->getCalleeSavedFrameSize(); 1985 // This is the size of callee saved XMMs. 1986 const auto& WinEHXMMSlotInfo = X86FI->getWinEHXMMSlotInfo(); 1987 unsigned XMMSize = WinEHXMMSlotInfo.size() * 1988 TRI->getSpillSize(X86::VR128RegClass); 1989 // This is the amount of stack a funclet needs to allocate. 1990 unsigned UsedSize; 1991 EHPersonality Personality = 1992 classifyEHPersonality(MF.getFunction().getPersonalityFn()); 1993 if (Personality == EHPersonality::CoreCLR) { 1994 // CLR funclets need to hold enough space to include the PSPSym, at the 1995 // same offset from the stack pointer (immediately after the prolog) as it 1996 // resides at in the main function. 1997 UsedSize = getPSPSlotOffsetFromSP(MF) + SlotSize; 1998 } else { 1999 // Other funclets just need enough stack for outgoing call arguments. 2000 UsedSize = MF.getFrameInfo().getMaxCallFrameSize(); 2001 } 2002 // RBP is not included in the callee saved register block. After pushing RBP, 2003 // everything is 16 byte aligned. Everything we allocate before an outgoing 2004 // call must also be 16 byte aligned. 2005 unsigned FrameSizeMinusRBP = alignTo(CSSize + UsedSize, getStackAlign()); 2006 // Subtract out the size of the callee saved registers. This is how much stack 2007 // each funclet will allocate. 2008 return FrameSizeMinusRBP + XMMSize - CSSize; 2009 } 2010 2011 static bool isTailCallOpcode(unsigned Opc) { 2012 return Opc == X86::TCRETURNri || Opc == X86::TCRETURNdi || 2013 Opc == X86::TCRETURNmi || 2014 Opc == X86::TCRETURNri64 || Opc == X86::TCRETURNdi64 || 2015 Opc == X86::TCRETURNmi64; 2016 } 2017 2018 void X86FrameLowering::emitEpilogue(MachineFunction &MF, 2019 MachineBasicBlock &MBB) const { 2020 const MachineFrameInfo &MFI = MF.getFrameInfo(); 2021 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); 2022 MachineBasicBlock::iterator Terminator = MBB.getFirstTerminator(); 2023 MachineBasicBlock::iterator MBBI = Terminator; 2024 DebugLoc DL; 2025 if (MBBI != MBB.end()) 2026 DL = MBBI->getDebugLoc(); 2027 // standard x86_64 and NaCl use 64-bit frame/stack pointers, x32 - 32-bit. 2028 const bool Is64BitILP32 = STI.isTarget64BitILP32(); 2029 Register FramePtr = TRI->getFrameRegister(MF); 2030 Register MachineFramePtr = 2031 Is64BitILP32 ? Register(getX86SubSuperRegister(FramePtr, 64)) : FramePtr; 2032 2033 bool IsWin64Prologue = MF.getTarget().getMCAsmInfo()->usesWindowsCFI(); 2034 bool NeedsWin64CFI = 2035 IsWin64Prologue && MF.getFunction().needsUnwindTableEntry(); 2036 bool IsFunclet = MBBI == MBB.end() ? false : isFuncletReturnInstr(*MBBI); 2037 2038 // Get the number of bytes to allocate from the FrameInfo. 2039 uint64_t StackSize = MFI.getStackSize(); 2040 uint64_t MaxAlign = calculateMaxStackAlign(MF); 2041 unsigned CSSize = X86FI->getCalleeSavedFrameSize(); 2042 unsigned TailCallArgReserveSize = -X86FI->getTCReturnAddrDelta(); 2043 bool HasFP = hasFP(MF); 2044 uint64_t NumBytes = 0; 2045 2046 bool NeedsDwarfCFI = (!MF.getTarget().getTargetTriple().isOSDarwin() && 2047 !MF.getTarget().getTargetTriple().isOSWindows()) && 2048 MF.needsFrameMoves(); 2049 2050 if (IsFunclet) { 2051 assert(HasFP && "EH funclets without FP not yet implemented"); 2052 NumBytes = getWinEHFuncletFrameSize(MF); 2053 } else if (HasFP) { 2054 // Calculate required stack adjustment. 2055 uint64_t FrameSize = StackSize - SlotSize; 2056 NumBytes = FrameSize - CSSize - TailCallArgReserveSize; 2057 2058 // Callee-saved registers were pushed on stack before the stack was 2059 // realigned. 2060 if (TRI->hasStackRealignment(MF) && !IsWin64Prologue) 2061 NumBytes = alignTo(FrameSize, MaxAlign); 2062 } else { 2063 NumBytes = StackSize - CSSize - TailCallArgReserveSize; 2064 } 2065 uint64_t SEHStackAllocAmt = NumBytes; 2066 2067 // AfterPop is the position to insert .cfi_restore. 2068 MachineBasicBlock::iterator AfterPop = MBBI; 2069 if (HasFP) { 2070 if (X86FI->hasSwiftAsyncContext()) { 2071 // Discard the context. 2072 int Offset = 16 + mergeSPUpdates(MBB, MBBI, true); 2073 emitSPUpdate(MBB, MBBI, DL, Offset, /*InEpilogue*/true); 2074 } 2075 // Pop EBP. 2076 BuildMI(MBB, MBBI, DL, TII.get(Is64Bit ? X86::POP64r : X86::POP32r), 2077 MachineFramePtr) 2078 .setMIFlag(MachineInstr::FrameDestroy); 2079 2080 // We need to reset FP to its untagged state on return. Bit 60 is currently 2081 // used to show the presence of an extended frame. 2082 if (X86FI->hasSwiftAsyncContext()) { 2083 BuildMI(MBB, MBBI, DL, TII.get(X86::BTR64ri8), 2084 MachineFramePtr) 2085 .addUse(MachineFramePtr) 2086 .addImm(60) 2087 .setMIFlag(MachineInstr::FrameDestroy); 2088 } 2089 2090 if (NeedsDwarfCFI) { 2091 unsigned DwarfStackPtr = 2092 TRI->getDwarfRegNum(Is64Bit ? X86::RSP : X86::ESP, true); 2093 BuildCFI(MBB, MBBI, DL, 2094 MCCFIInstruction::cfiDefCfa(nullptr, DwarfStackPtr, SlotSize)); 2095 if (!MBB.succ_empty() && !MBB.isReturnBlock()) { 2096 unsigned DwarfFramePtr = TRI->getDwarfRegNum(MachineFramePtr, true); 2097 BuildCFI(MBB, AfterPop, DL, 2098 MCCFIInstruction::createRestore(nullptr, DwarfFramePtr)); 2099 --MBBI; 2100 --AfterPop; 2101 } 2102 --MBBI; 2103 } 2104 } 2105 2106 MachineBasicBlock::iterator FirstCSPop = MBBI; 2107 // Skip the callee-saved pop instructions. 2108 while (MBBI != MBB.begin()) { 2109 MachineBasicBlock::iterator PI = std::prev(MBBI); 2110 unsigned Opc = PI->getOpcode(); 2111 2112 if (Opc != X86::DBG_VALUE && !PI->isTerminator()) { 2113 if ((Opc != X86::POP32r || !PI->getFlag(MachineInstr::FrameDestroy)) && 2114 (Opc != X86::POP64r || !PI->getFlag(MachineInstr::FrameDestroy)) && 2115 (Opc != X86::BTR64ri8 || !PI->getFlag(MachineInstr::FrameDestroy)) && 2116 (Opc != X86::ADD64ri8 || !PI->getFlag(MachineInstr::FrameDestroy))) 2117 break; 2118 FirstCSPop = PI; 2119 } 2120 2121 --MBBI; 2122 } 2123 MBBI = FirstCSPop; 2124 2125 if (IsFunclet && Terminator->getOpcode() == X86::CATCHRET) 2126 emitCatchRetReturnValue(MBB, FirstCSPop, &*Terminator); 2127 2128 if (MBBI != MBB.end()) 2129 DL = MBBI->getDebugLoc(); 2130 // If there is an ADD32ri or SUB32ri of ESP immediately before this 2131 // instruction, merge the two instructions. 2132 if (NumBytes || MFI.hasVarSizedObjects()) 2133 NumBytes += mergeSPUpdates(MBB, MBBI, true); 2134 2135 // If dynamic alloca is used, then reset esp to point to the last callee-saved 2136 // slot before popping them off! Same applies for the case, when stack was 2137 // realigned. Don't do this if this was a funclet epilogue, since the funclets 2138 // will not do realignment or dynamic stack allocation. 2139 if (((TRI->hasStackRealignment(MF)) || MFI.hasVarSizedObjects()) && 2140 !IsFunclet) { 2141 if (TRI->hasStackRealignment(MF)) 2142 MBBI = FirstCSPop; 2143 unsigned SEHFrameOffset = calculateSetFPREG(SEHStackAllocAmt); 2144 uint64_t LEAAmount = 2145 IsWin64Prologue ? SEHStackAllocAmt - SEHFrameOffset : -CSSize; 2146 2147 if (X86FI->hasSwiftAsyncContext()) 2148 LEAAmount -= 16; 2149 2150 // There are only two legal forms of epilogue: 2151 // - add SEHAllocationSize, %rsp 2152 // - lea SEHAllocationSize(%FramePtr), %rsp 2153 // 2154 // 'mov %FramePtr, %rsp' will not be recognized as an epilogue sequence. 2155 // However, we may use this sequence if we have a frame pointer because the 2156 // effects of the prologue can safely be undone. 2157 if (LEAAmount != 0) { 2158 unsigned Opc = getLEArOpcode(Uses64BitFramePtr); 2159 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr), 2160 FramePtr, false, LEAAmount); 2161 --MBBI; 2162 } else { 2163 unsigned Opc = (Uses64BitFramePtr ? X86::MOV64rr : X86::MOV32rr); 2164 BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr) 2165 .addReg(FramePtr); 2166 --MBBI; 2167 } 2168 } else if (NumBytes) { 2169 // Adjust stack pointer back: ESP += numbytes. 2170 emitSPUpdate(MBB, MBBI, DL, NumBytes, /*InEpilogue=*/true); 2171 if (!HasFP && NeedsDwarfCFI) { 2172 // Define the current CFA rule to use the provided offset. 2173 BuildCFI(MBB, MBBI, DL, 2174 MCCFIInstruction::cfiDefCfaOffset( 2175 nullptr, CSSize + TailCallArgReserveSize + SlotSize)); 2176 } 2177 --MBBI; 2178 } 2179 2180 // Windows unwinder will not invoke function's exception handler if IP is 2181 // either in prologue or in epilogue. This behavior causes a problem when a 2182 // call immediately precedes an epilogue, because the return address points 2183 // into the epilogue. To cope with that, we insert an epilogue marker here, 2184 // then replace it with a 'nop' if it ends up immediately after a CALL in the 2185 // final emitted code. 2186 if (NeedsWin64CFI && MF.hasWinCFI()) 2187 BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_Epilogue)); 2188 2189 if (!HasFP && NeedsDwarfCFI) { 2190 MBBI = FirstCSPop; 2191 int64_t Offset = -CSSize - SlotSize; 2192 // Mark callee-saved pop instruction. 2193 // Define the current CFA rule to use the provided offset. 2194 while (MBBI != MBB.end()) { 2195 MachineBasicBlock::iterator PI = MBBI; 2196 unsigned Opc = PI->getOpcode(); 2197 ++MBBI; 2198 if (Opc == X86::POP32r || Opc == X86::POP64r) { 2199 Offset += SlotSize; 2200 BuildCFI(MBB, MBBI, DL, 2201 MCCFIInstruction::cfiDefCfaOffset(nullptr, -Offset)); 2202 } 2203 } 2204 } 2205 2206 // Emit DWARF info specifying the restores of the callee-saved registers. 2207 // For epilogue with return inside or being other block without successor, 2208 // no need to generate .cfi_restore for callee-saved registers. 2209 if (NeedsDwarfCFI && !MBB.succ_empty()) 2210 emitCalleeSavedFrameMoves(MBB, AfterPop, DL, false); 2211 2212 if (Terminator == MBB.end() || !isTailCallOpcode(Terminator->getOpcode())) { 2213 // Add the return addr area delta back since we are not tail calling. 2214 int Offset = -1 * X86FI->getTCReturnAddrDelta(); 2215 assert(Offset >= 0 && "TCDelta should never be positive"); 2216 if (Offset) { 2217 // Check for possible merge with preceding ADD instruction. 2218 Offset += mergeSPUpdates(MBB, Terminator, true); 2219 emitSPUpdate(MBB, Terminator, DL, Offset, /*InEpilogue=*/true); 2220 } 2221 } 2222 2223 // Emit tilerelease for AMX kernel. 2224 if (X86FI->hasVirtualTileReg()) 2225 BuildMI(MBB, Terminator, DL, TII.get(X86::TILERELEASE)); 2226 } 2227 2228 StackOffset X86FrameLowering::getFrameIndexReference(const MachineFunction &MF, 2229 int FI, 2230 Register &FrameReg) const { 2231 const MachineFrameInfo &MFI = MF.getFrameInfo(); 2232 2233 bool IsFixed = MFI.isFixedObjectIndex(FI); 2234 // We can't calculate offset from frame pointer if the stack is realigned, 2235 // so enforce usage of stack/base pointer. The base pointer is used when we 2236 // have dynamic allocas in addition to dynamic realignment. 2237 if (TRI->hasBasePointer(MF)) 2238 FrameReg = IsFixed ? TRI->getFramePtr() : TRI->getBaseRegister(); 2239 else if (TRI->hasStackRealignment(MF)) 2240 FrameReg = IsFixed ? TRI->getFramePtr() : TRI->getStackRegister(); 2241 else 2242 FrameReg = TRI->getFrameRegister(MF); 2243 2244 // Offset will hold the offset from the stack pointer at function entry to the 2245 // object. 2246 // We need to factor in additional offsets applied during the prologue to the 2247 // frame, base, and stack pointer depending on which is used. 2248 int Offset = MFI.getObjectOffset(FI) - getOffsetOfLocalArea(); 2249 const X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); 2250 unsigned CSSize = X86FI->getCalleeSavedFrameSize(); 2251 uint64_t StackSize = MFI.getStackSize(); 2252 bool IsWin64Prologue = MF.getTarget().getMCAsmInfo()->usesWindowsCFI(); 2253 int64_t FPDelta = 0; 2254 2255 // In an x86 interrupt, remove the offset we added to account for the return 2256 // address from any stack object allocated in the caller's frame. Interrupts 2257 // do not have a standard return address. Fixed objects in the current frame, 2258 // such as SSE register spills, should not get this treatment. 2259 if (MF.getFunction().getCallingConv() == CallingConv::X86_INTR && 2260 Offset >= 0) { 2261 Offset += getOffsetOfLocalArea(); 2262 } 2263 2264 if (IsWin64Prologue) { 2265 assert(!MFI.hasCalls() || (StackSize % 16) == 8); 2266 2267 // Calculate required stack adjustment. 2268 uint64_t FrameSize = StackSize - SlotSize; 2269 // If required, include space for extra hidden slot for stashing base pointer. 2270 if (X86FI->getRestoreBasePointer()) 2271 FrameSize += SlotSize; 2272 uint64_t NumBytes = FrameSize - CSSize; 2273 2274 uint64_t SEHFrameOffset = calculateSetFPREG(NumBytes); 2275 if (FI && FI == X86FI->getFAIndex()) 2276 return StackOffset::getFixed(-SEHFrameOffset); 2277 2278 // FPDelta is the offset from the "traditional" FP location of the old base 2279 // pointer followed by return address and the location required by the 2280 // restricted Win64 prologue. 2281 // Add FPDelta to all offsets below that go through the frame pointer. 2282 FPDelta = FrameSize - SEHFrameOffset; 2283 assert((!MFI.hasCalls() || (FPDelta % 16) == 0) && 2284 "FPDelta isn't aligned per the Win64 ABI!"); 2285 } 2286 2287 if (FrameReg == TRI->getFramePtr()) { 2288 // Skip saved EBP/RBP 2289 Offset += SlotSize; 2290 2291 // Account for restricted Windows prologue. 2292 Offset += FPDelta; 2293 2294 // Skip the RETADDR move area 2295 int TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta(); 2296 if (TailCallReturnAddrDelta < 0) 2297 Offset -= TailCallReturnAddrDelta; 2298 2299 return StackOffset::getFixed(Offset); 2300 } 2301 2302 // FrameReg is either the stack pointer or a base pointer. But the base is 2303 // located at the end of the statically known StackSize so the distinction 2304 // doesn't really matter. 2305 if (TRI->hasStackRealignment(MF) || TRI->hasBasePointer(MF)) 2306 assert(isAligned(MFI.getObjectAlign(FI), -(Offset + StackSize))); 2307 return StackOffset::getFixed(Offset + StackSize); 2308 } 2309 2310 int X86FrameLowering::getWin64EHFrameIndexRef(const MachineFunction &MF, int FI, 2311 Register &FrameReg) const { 2312 const MachineFrameInfo &MFI = MF.getFrameInfo(); 2313 const X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); 2314 const auto& WinEHXMMSlotInfo = X86FI->getWinEHXMMSlotInfo(); 2315 const auto it = WinEHXMMSlotInfo.find(FI); 2316 2317 if (it == WinEHXMMSlotInfo.end()) 2318 return getFrameIndexReference(MF, FI, FrameReg).getFixed(); 2319 2320 FrameReg = TRI->getStackRegister(); 2321 return alignDown(MFI.getMaxCallFrameSize(), getStackAlign().value()) + 2322 it->second; 2323 } 2324 2325 StackOffset 2326 X86FrameLowering::getFrameIndexReferenceSP(const MachineFunction &MF, int FI, 2327 Register &FrameReg, 2328 int Adjustment) const { 2329 const MachineFrameInfo &MFI = MF.getFrameInfo(); 2330 FrameReg = TRI->getStackRegister(); 2331 return StackOffset::getFixed(MFI.getObjectOffset(FI) - 2332 getOffsetOfLocalArea() + Adjustment); 2333 } 2334 2335 StackOffset 2336 X86FrameLowering::getFrameIndexReferencePreferSP(const MachineFunction &MF, 2337 int FI, Register &FrameReg, 2338 bool IgnoreSPUpdates) const { 2339 2340 const MachineFrameInfo &MFI = MF.getFrameInfo(); 2341 // Does not include any dynamic realign. 2342 const uint64_t StackSize = MFI.getStackSize(); 2343 // LLVM arranges the stack as follows: 2344 // ... 2345 // ARG2 2346 // ARG1 2347 // RETADDR 2348 // PUSH RBP <-- RBP points here 2349 // PUSH CSRs 2350 // ~~~~~~~ <-- possible stack realignment (non-win64) 2351 // ... 2352 // STACK OBJECTS 2353 // ... <-- RSP after prologue points here 2354 // ~~~~~~~ <-- possible stack realignment (win64) 2355 // 2356 // if (hasVarSizedObjects()): 2357 // ... <-- "base pointer" (ESI/RBX) points here 2358 // DYNAMIC ALLOCAS 2359 // ... <-- RSP points here 2360 // 2361 // Case 1: In the simple case of no stack realignment and no dynamic 2362 // allocas, both "fixed" stack objects (arguments and CSRs) are addressable 2363 // with fixed offsets from RSP. 2364 // 2365 // Case 2: In the case of stack realignment with no dynamic allocas, fixed 2366 // stack objects are addressed with RBP and regular stack objects with RSP. 2367 // 2368 // Case 3: In the case of dynamic allocas and stack realignment, RSP is used 2369 // to address stack arguments for outgoing calls and nothing else. The "base 2370 // pointer" points to local variables, and RBP points to fixed objects. 2371 // 2372 // In cases 2 and 3, we can only answer for non-fixed stack objects, and the 2373 // answer we give is relative to the SP after the prologue, and not the 2374 // SP in the middle of the function. 2375 2376 if (MFI.isFixedObjectIndex(FI) && TRI->hasStackRealignment(MF) && 2377 !STI.isTargetWin64()) 2378 return getFrameIndexReference(MF, FI, FrameReg); 2379 2380 // If !hasReservedCallFrame the function might have SP adjustement in the 2381 // body. So, even though the offset is statically known, it depends on where 2382 // we are in the function. 2383 if (!IgnoreSPUpdates && !hasReservedCallFrame(MF)) 2384 return getFrameIndexReference(MF, FI, FrameReg); 2385 2386 // We don't handle tail calls, and shouldn't be seeing them either. 2387 assert(MF.getInfo<X86MachineFunctionInfo>()->getTCReturnAddrDelta() >= 0 && 2388 "we don't handle this case!"); 2389 2390 // This is how the math works out: 2391 // 2392 // %rsp grows (i.e. gets lower) left to right. Each box below is 2393 // one word (eight bytes). Obj0 is the stack slot we're trying to 2394 // get to. 2395 // 2396 // ---------------------------------- 2397 // | BP | Obj0 | Obj1 | ... | ObjN | 2398 // ---------------------------------- 2399 // ^ ^ ^ ^ 2400 // A B C E 2401 // 2402 // A is the incoming stack pointer. 2403 // (B - A) is the local area offset (-8 for x86-64) [1] 2404 // (C - A) is the Offset returned by MFI.getObjectOffset for Obj0 [2] 2405 // 2406 // |(E - B)| is the StackSize (absolute value, positive). For a 2407 // stack that grown down, this works out to be (B - E). [3] 2408 // 2409 // E is also the value of %rsp after stack has been set up, and we 2410 // want (C - E) -- the value we can add to %rsp to get to Obj0. Now 2411 // (C - E) == (C - A) - (B - A) + (B - E) 2412 // { Using [1], [2] and [3] above } 2413 // == getObjectOffset - LocalAreaOffset + StackSize 2414 2415 return getFrameIndexReferenceSP(MF, FI, FrameReg, StackSize); 2416 } 2417 2418 bool X86FrameLowering::assignCalleeSavedSpillSlots( 2419 MachineFunction &MF, const TargetRegisterInfo *TRI, 2420 std::vector<CalleeSavedInfo> &CSI) const { 2421 MachineFrameInfo &MFI = MF.getFrameInfo(); 2422 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); 2423 2424 unsigned CalleeSavedFrameSize = 0; 2425 unsigned XMMCalleeSavedFrameSize = 0; 2426 auto &WinEHXMMSlotInfo = X86FI->getWinEHXMMSlotInfo(); 2427 int SpillSlotOffset = getOffsetOfLocalArea() + X86FI->getTCReturnAddrDelta(); 2428 2429 int64_t TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta(); 2430 2431 if (TailCallReturnAddrDelta < 0) { 2432 // create RETURNADDR area 2433 // arg 2434 // arg 2435 // RETADDR 2436 // { ... 2437 // RETADDR area 2438 // ... 2439 // } 2440 // [EBP] 2441 MFI.CreateFixedObject(-TailCallReturnAddrDelta, 2442 TailCallReturnAddrDelta - SlotSize, true); 2443 } 2444 2445 // Spill the BasePtr if it's used. 2446 if (this->TRI->hasBasePointer(MF)) { 2447 // Allocate a spill slot for EBP if we have a base pointer and EH funclets. 2448 if (MF.hasEHFunclets()) { 2449 int FI = MFI.CreateSpillStackObject(SlotSize, Align(SlotSize)); 2450 X86FI->setHasSEHFramePtrSave(true); 2451 X86FI->setSEHFramePtrSaveIndex(FI); 2452 } 2453 } 2454 2455 if (hasFP(MF)) { 2456 // emitPrologue always spills frame register the first thing. 2457 SpillSlotOffset -= SlotSize; 2458 MFI.CreateFixedSpillStackObject(SlotSize, SpillSlotOffset); 2459 2460 // The async context lives directly before the frame pointer, and we 2461 // allocate a second slot to preserve stack alignment. 2462 if (X86FI->hasSwiftAsyncContext()) { 2463 SpillSlotOffset -= SlotSize; 2464 MFI.CreateFixedSpillStackObject(SlotSize, SpillSlotOffset); 2465 SpillSlotOffset -= SlotSize; 2466 } 2467 2468 // Since emitPrologue and emitEpilogue will handle spilling and restoring of 2469 // the frame register, we can delete it from CSI list and not have to worry 2470 // about avoiding it later. 2471 Register FPReg = TRI->getFrameRegister(MF); 2472 for (unsigned i = 0; i < CSI.size(); ++i) { 2473 if (TRI->regsOverlap(CSI[i].getReg(),FPReg)) { 2474 CSI.erase(CSI.begin() + i); 2475 break; 2476 } 2477 } 2478 } 2479 2480 // Assign slots for GPRs. It increases frame size. 2481 for (unsigned i = CSI.size(); i != 0; --i) { 2482 unsigned Reg = CSI[i - 1].getReg(); 2483 2484 if (!X86::GR64RegClass.contains(Reg) && !X86::GR32RegClass.contains(Reg)) 2485 continue; 2486 2487 SpillSlotOffset -= SlotSize; 2488 CalleeSavedFrameSize += SlotSize; 2489 2490 int SlotIndex = MFI.CreateFixedSpillStackObject(SlotSize, SpillSlotOffset); 2491 CSI[i - 1].setFrameIdx(SlotIndex); 2492 } 2493 2494 X86FI->setCalleeSavedFrameSize(CalleeSavedFrameSize); 2495 MFI.setCVBytesOfCalleeSavedRegisters(CalleeSavedFrameSize); 2496 2497 // Assign slots for XMMs. 2498 for (unsigned i = CSI.size(); i != 0; --i) { 2499 unsigned Reg = CSI[i - 1].getReg(); 2500 if (X86::GR64RegClass.contains(Reg) || X86::GR32RegClass.contains(Reg)) 2501 continue; 2502 2503 // If this is k-register make sure we lookup via the largest legal type. 2504 MVT VT = MVT::Other; 2505 if (X86::VK16RegClass.contains(Reg)) 2506 VT = STI.hasBWI() ? MVT::v64i1 : MVT::v16i1; 2507 2508 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg, VT); 2509 unsigned Size = TRI->getSpillSize(*RC); 2510 Align Alignment = TRI->getSpillAlign(*RC); 2511 // ensure alignment 2512 assert(SpillSlotOffset < 0 && "SpillSlotOffset should always < 0 on X86"); 2513 SpillSlotOffset = -alignTo(-SpillSlotOffset, Alignment); 2514 2515 // spill into slot 2516 SpillSlotOffset -= Size; 2517 int SlotIndex = MFI.CreateFixedSpillStackObject(Size, SpillSlotOffset); 2518 CSI[i - 1].setFrameIdx(SlotIndex); 2519 MFI.ensureMaxAlignment(Alignment); 2520 2521 // Save the start offset and size of XMM in stack frame for funclets. 2522 if (X86::VR128RegClass.contains(Reg)) { 2523 WinEHXMMSlotInfo[SlotIndex] = XMMCalleeSavedFrameSize; 2524 XMMCalleeSavedFrameSize += Size; 2525 } 2526 } 2527 2528 return true; 2529 } 2530 2531 bool X86FrameLowering::spillCalleeSavedRegisters( 2532 MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, 2533 ArrayRef<CalleeSavedInfo> CSI, const TargetRegisterInfo *TRI) const { 2534 DebugLoc DL = MBB.findDebugLoc(MI); 2535 2536 // Don't save CSRs in 32-bit EH funclets. The caller saves EBX, EBP, ESI, EDI 2537 // for us, and there are no XMM CSRs on Win32. 2538 if (MBB.isEHFuncletEntry() && STI.is32Bit() && STI.isOSWindows()) 2539 return true; 2540 2541 // Push GPRs. It increases frame size. 2542 const MachineFunction &MF = *MBB.getParent(); 2543 unsigned Opc = STI.is64Bit() ? X86::PUSH64r : X86::PUSH32r; 2544 for (unsigned i = CSI.size(); i != 0; --i) { 2545 unsigned Reg = CSI[i - 1].getReg(); 2546 2547 if (!X86::GR64RegClass.contains(Reg) && !X86::GR32RegClass.contains(Reg)) 2548 continue; 2549 2550 const MachineRegisterInfo &MRI = MF.getRegInfo(); 2551 bool isLiveIn = MRI.isLiveIn(Reg); 2552 if (!isLiveIn) 2553 MBB.addLiveIn(Reg); 2554 2555 // Decide whether we can add a kill flag to the use. 2556 bool CanKill = !isLiveIn; 2557 // Check if any subregister is live-in 2558 if (CanKill) { 2559 for (MCRegAliasIterator AReg(Reg, TRI, false); AReg.isValid(); ++AReg) { 2560 if (MRI.isLiveIn(*AReg)) { 2561 CanKill = false; 2562 break; 2563 } 2564 } 2565 } 2566 2567 // Do not set a kill flag on values that are also marked as live-in. This 2568 // happens with the @llvm-returnaddress intrinsic and with arguments 2569 // passed in callee saved registers. 2570 // Omitting the kill flags is conservatively correct even if the live-in 2571 // is not used after all. 2572 BuildMI(MBB, MI, DL, TII.get(Opc)).addReg(Reg, getKillRegState(CanKill)) 2573 .setMIFlag(MachineInstr::FrameSetup); 2574 } 2575 2576 // Make XMM regs spilled. X86 does not have ability of push/pop XMM. 2577 // It can be done by spilling XMMs to stack frame. 2578 for (unsigned i = CSI.size(); i != 0; --i) { 2579 unsigned Reg = CSI[i-1].getReg(); 2580 if (X86::GR64RegClass.contains(Reg) || X86::GR32RegClass.contains(Reg)) 2581 continue; 2582 2583 // If this is k-register make sure we lookup via the largest legal type. 2584 MVT VT = MVT::Other; 2585 if (X86::VK16RegClass.contains(Reg)) 2586 VT = STI.hasBWI() ? MVT::v64i1 : MVT::v16i1; 2587 2588 // Add the callee-saved register as live-in. It's killed at the spill. 2589 MBB.addLiveIn(Reg); 2590 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg, VT); 2591 2592 TII.storeRegToStackSlot(MBB, MI, Reg, true, CSI[i - 1].getFrameIdx(), RC, 2593 TRI); 2594 --MI; 2595 MI->setFlag(MachineInstr::FrameSetup); 2596 ++MI; 2597 } 2598 2599 return true; 2600 } 2601 2602 void X86FrameLowering::emitCatchRetReturnValue(MachineBasicBlock &MBB, 2603 MachineBasicBlock::iterator MBBI, 2604 MachineInstr *CatchRet) const { 2605 // SEH shouldn't use catchret. 2606 assert(!isAsynchronousEHPersonality(classifyEHPersonality( 2607 MBB.getParent()->getFunction().getPersonalityFn())) && 2608 "SEH should not use CATCHRET"); 2609 const DebugLoc &DL = CatchRet->getDebugLoc(); 2610 MachineBasicBlock *CatchRetTarget = CatchRet->getOperand(0).getMBB(); 2611 2612 // Fill EAX/RAX with the address of the target block. 2613 if (STI.is64Bit()) { 2614 // LEA64r CatchRetTarget(%rip), %rax 2615 BuildMI(MBB, MBBI, DL, TII.get(X86::LEA64r), X86::RAX) 2616 .addReg(X86::RIP) 2617 .addImm(0) 2618 .addReg(0) 2619 .addMBB(CatchRetTarget) 2620 .addReg(0); 2621 } else { 2622 // MOV32ri $CatchRetTarget, %eax 2623 BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32ri), X86::EAX) 2624 .addMBB(CatchRetTarget); 2625 } 2626 2627 // Record that we've taken the address of CatchRetTarget and no longer just 2628 // reference it in a terminator. 2629 CatchRetTarget->setHasAddressTaken(); 2630 } 2631 2632 bool X86FrameLowering::restoreCalleeSavedRegisters( 2633 MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, 2634 MutableArrayRef<CalleeSavedInfo> CSI, const TargetRegisterInfo *TRI) const { 2635 if (CSI.empty()) 2636 return false; 2637 2638 if (MI != MBB.end() && isFuncletReturnInstr(*MI) && STI.isOSWindows()) { 2639 // Don't restore CSRs in 32-bit EH funclets. Matches 2640 // spillCalleeSavedRegisters. 2641 if (STI.is32Bit()) 2642 return true; 2643 // Don't restore CSRs before an SEH catchret. SEH except blocks do not form 2644 // funclets. emitEpilogue transforms these to normal jumps. 2645 if (MI->getOpcode() == X86::CATCHRET) { 2646 const Function &F = MBB.getParent()->getFunction(); 2647 bool IsSEH = isAsynchronousEHPersonality( 2648 classifyEHPersonality(F.getPersonalityFn())); 2649 if (IsSEH) 2650 return true; 2651 } 2652 } 2653 2654 DebugLoc DL = MBB.findDebugLoc(MI); 2655 2656 // Reload XMMs from stack frame. 2657 for (const CalleeSavedInfo &I : CSI) { 2658 unsigned Reg = I.getReg(); 2659 if (X86::GR64RegClass.contains(Reg) || 2660 X86::GR32RegClass.contains(Reg)) 2661 continue; 2662 2663 // If this is k-register make sure we lookup via the largest legal type. 2664 MVT VT = MVT::Other; 2665 if (X86::VK16RegClass.contains(Reg)) 2666 VT = STI.hasBWI() ? MVT::v64i1 : MVT::v16i1; 2667 2668 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg, VT); 2669 TII.loadRegFromStackSlot(MBB, MI, Reg, I.getFrameIdx(), RC, TRI); 2670 } 2671 2672 // POP GPRs. 2673 unsigned Opc = STI.is64Bit() ? X86::POP64r : X86::POP32r; 2674 for (const CalleeSavedInfo &I : CSI) { 2675 unsigned Reg = I.getReg(); 2676 if (!X86::GR64RegClass.contains(Reg) && 2677 !X86::GR32RegClass.contains(Reg)) 2678 continue; 2679 2680 BuildMI(MBB, MI, DL, TII.get(Opc), Reg) 2681 .setMIFlag(MachineInstr::FrameDestroy); 2682 } 2683 return true; 2684 } 2685 2686 void X86FrameLowering::determineCalleeSaves(MachineFunction &MF, 2687 BitVector &SavedRegs, 2688 RegScavenger *RS) const { 2689 TargetFrameLowering::determineCalleeSaves(MF, SavedRegs, RS); 2690 2691 // Spill the BasePtr if it's used. 2692 if (TRI->hasBasePointer(MF)){ 2693 Register BasePtr = TRI->getBaseRegister(); 2694 if (STI.isTarget64BitILP32()) 2695 BasePtr = getX86SubSuperRegister(BasePtr, 64); 2696 SavedRegs.set(BasePtr); 2697 } 2698 } 2699 2700 static bool 2701 HasNestArgument(const MachineFunction *MF) { 2702 const Function &F = MF->getFunction(); 2703 for (Function::const_arg_iterator I = F.arg_begin(), E = F.arg_end(); 2704 I != E; I++) { 2705 if (I->hasNestAttr() && !I->use_empty()) 2706 return true; 2707 } 2708 return false; 2709 } 2710 2711 /// GetScratchRegister - Get a temp register for performing work in the 2712 /// segmented stack and the Erlang/HiPE stack prologue. Depending on platform 2713 /// and the properties of the function either one or two registers will be 2714 /// needed. Set primary to true for the first register, false for the second. 2715 static unsigned 2716 GetScratchRegister(bool Is64Bit, bool IsLP64, const MachineFunction &MF, bool Primary) { 2717 CallingConv::ID CallingConvention = MF.getFunction().getCallingConv(); 2718 2719 // Erlang stuff. 2720 if (CallingConvention == CallingConv::HiPE) { 2721 if (Is64Bit) 2722 return Primary ? X86::R14 : X86::R13; 2723 else 2724 return Primary ? X86::EBX : X86::EDI; 2725 } 2726 2727 if (Is64Bit) { 2728 if (IsLP64) 2729 return Primary ? X86::R11 : X86::R12; 2730 else 2731 return Primary ? X86::R11D : X86::R12D; 2732 } 2733 2734 bool IsNested = HasNestArgument(&MF); 2735 2736 if (CallingConvention == CallingConv::X86_FastCall || 2737 CallingConvention == CallingConv::Fast || 2738 CallingConvention == CallingConv::Tail) { 2739 if (IsNested) 2740 report_fatal_error("Segmented stacks does not support fastcall with " 2741 "nested function."); 2742 return Primary ? X86::EAX : X86::ECX; 2743 } 2744 if (IsNested) 2745 return Primary ? X86::EDX : X86::EAX; 2746 return Primary ? X86::ECX : X86::EAX; 2747 } 2748 2749 // The stack limit in the TCB is set to this many bytes above the actual stack 2750 // limit. 2751 static const uint64_t kSplitStackAvailable = 256; 2752 2753 void X86FrameLowering::adjustForSegmentedStacks( 2754 MachineFunction &MF, MachineBasicBlock &PrologueMBB) const { 2755 MachineFrameInfo &MFI = MF.getFrameInfo(); 2756 uint64_t StackSize; 2757 unsigned TlsReg, TlsOffset; 2758 DebugLoc DL; 2759 2760 // To support shrink-wrapping we would need to insert the new blocks 2761 // at the right place and update the branches to PrologueMBB. 2762 assert(&(*MF.begin()) == &PrologueMBB && "Shrink-wrapping not supported yet"); 2763 2764 unsigned ScratchReg = GetScratchRegister(Is64Bit, IsLP64, MF, true); 2765 assert(!MF.getRegInfo().isLiveIn(ScratchReg) && 2766 "Scratch register is live-in"); 2767 2768 if (MF.getFunction().isVarArg()) 2769 report_fatal_error("Segmented stacks do not support vararg functions."); 2770 if (!STI.isTargetLinux() && !STI.isTargetDarwin() && !STI.isTargetWin32() && 2771 !STI.isTargetWin64() && !STI.isTargetFreeBSD() && 2772 !STI.isTargetDragonFly()) 2773 report_fatal_error("Segmented stacks not supported on this platform."); 2774 2775 // Eventually StackSize will be calculated by a link-time pass; which will 2776 // also decide whether checking code needs to be injected into this particular 2777 // prologue. 2778 StackSize = MFI.getStackSize(); 2779 2780 // Do not generate a prologue for leaf functions with a stack of size zero. 2781 // For non-leaf functions we have to allow for the possibility that the 2782 // callis to a non-split function, as in PR37807. This function could also 2783 // take the address of a non-split function. When the linker tries to adjust 2784 // its non-existent prologue, it would fail with an error. Mark the object 2785 // file so that such failures are not errors. See this Go language bug-report 2786 // https://go-review.googlesource.com/c/go/+/148819/ 2787 if (StackSize == 0 && !MFI.hasTailCall()) { 2788 MF.getMMI().setHasNosplitStack(true); 2789 return; 2790 } 2791 2792 MachineBasicBlock *allocMBB = MF.CreateMachineBasicBlock(); 2793 MachineBasicBlock *checkMBB = MF.CreateMachineBasicBlock(); 2794 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); 2795 bool IsNested = false; 2796 2797 // We need to know if the function has a nest argument only in 64 bit mode. 2798 if (Is64Bit) 2799 IsNested = HasNestArgument(&MF); 2800 2801 // The MOV R10, RAX needs to be in a different block, since the RET we emit in 2802 // allocMBB needs to be last (terminating) instruction. 2803 2804 for (const auto &LI : PrologueMBB.liveins()) { 2805 allocMBB->addLiveIn(LI); 2806 checkMBB->addLiveIn(LI); 2807 } 2808 2809 if (IsNested) 2810 allocMBB->addLiveIn(IsLP64 ? X86::R10 : X86::R10D); 2811 2812 MF.push_front(allocMBB); 2813 MF.push_front(checkMBB); 2814 2815 // When the frame size is less than 256 we just compare the stack 2816 // boundary directly to the value of the stack pointer, per gcc. 2817 bool CompareStackPointer = StackSize < kSplitStackAvailable; 2818 2819 // Read the limit off the current stacklet off the stack_guard location. 2820 if (Is64Bit) { 2821 if (STI.isTargetLinux()) { 2822 TlsReg = X86::FS; 2823 TlsOffset = IsLP64 ? 0x70 : 0x40; 2824 } else if (STI.isTargetDarwin()) { 2825 TlsReg = X86::GS; 2826 TlsOffset = 0x60 + 90*8; // See pthread_machdep.h. Steal TLS slot 90. 2827 } else if (STI.isTargetWin64()) { 2828 TlsReg = X86::GS; 2829 TlsOffset = 0x28; // pvArbitrary, reserved for application use 2830 } else if (STI.isTargetFreeBSD()) { 2831 TlsReg = X86::FS; 2832 TlsOffset = 0x18; 2833 } else if (STI.isTargetDragonFly()) { 2834 TlsReg = X86::FS; 2835 TlsOffset = 0x20; // use tls_tcb.tcb_segstack 2836 } else { 2837 report_fatal_error("Segmented stacks not supported on this platform."); 2838 } 2839 2840 if (CompareStackPointer) 2841 ScratchReg = IsLP64 ? X86::RSP : X86::ESP; 2842 else 2843 BuildMI(checkMBB, DL, TII.get(IsLP64 ? X86::LEA64r : X86::LEA64_32r), ScratchReg).addReg(X86::RSP) 2844 .addImm(1).addReg(0).addImm(-StackSize).addReg(0); 2845 2846 BuildMI(checkMBB, DL, TII.get(IsLP64 ? X86::CMP64rm : X86::CMP32rm)).addReg(ScratchReg) 2847 .addReg(0).addImm(1).addReg(0).addImm(TlsOffset).addReg(TlsReg); 2848 } else { 2849 if (STI.isTargetLinux()) { 2850 TlsReg = X86::GS; 2851 TlsOffset = 0x30; 2852 } else if (STI.isTargetDarwin()) { 2853 TlsReg = X86::GS; 2854 TlsOffset = 0x48 + 90*4; 2855 } else if (STI.isTargetWin32()) { 2856 TlsReg = X86::FS; 2857 TlsOffset = 0x14; // pvArbitrary, reserved for application use 2858 } else if (STI.isTargetDragonFly()) { 2859 TlsReg = X86::FS; 2860 TlsOffset = 0x10; // use tls_tcb.tcb_segstack 2861 } else if (STI.isTargetFreeBSD()) { 2862 report_fatal_error("Segmented stacks not supported on FreeBSD i386."); 2863 } else { 2864 report_fatal_error("Segmented stacks not supported on this platform."); 2865 } 2866 2867 if (CompareStackPointer) 2868 ScratchReg = X86::ESP; 2869 else 2870 BuildMI(checkMBB, DL, TII.get(X86::LEA32r), ScratchReg).addReg(X86::ESP) 2871 .addImm(1).addReg(0).addImm(-StackSize).addReg(0); 2872 2873 if (STI.isTargetLinux() || STI.isTargetWin32() || STI.isTargetWin64() || 2874 STI.isTargetDragonFly()) { 2875 BuildMI(checkMBB, DL, TII.get(X86::CMP32rm)).addReg(ScratchReg) 2876 .addReg(0).addImm(0).addReg(0).addImm(TlsOffset).addReg(TlsReg); 2877 } else if (STI.isTargetDarwin()) { 2878 2879 // TlsOffset doesn't fit into a mod r/m byte so we need an extra register. 2880 unsigned ScratchReg2; 2881 bool SaveScratch2; 2882 if (CompareStackPointer) { 2883 // The primary scratch register is available for holding the TLS offset. 2884 ScratchReg2 = GetScratchRegister(Is64Bit, IsLP64, MF, true); 2885 SaveScratch2 = false; 2886 } else { 2887 // Need to use a second register to hold the TLS offset 2888 ScratchReg2 = GetScratchRegister(Is64Bit, IsLP64, MF, false); 2889 2890 // Unfortunately, with fastcc the second scratch register may hold an 2891 // argument. 2892 SaveScratch2 = MF.getRegInfo().isLiveIn(ScratchReg2); 2893 } 2894 2895 // If Scratch2 is live-in then it needs to be saved. 2896 assert((!MF.getRegInfo().isLiveIn(ScratchReg2) || SaveScratch2) && 2897 "Scratch register is live-in and not saved"); 2898 2899 if (SaveScratch2) 2900 BuildMI(checkMBB, DL, TII.get(X86::PUSH32r)) 2901 .addReg(ScratchReg2, RegState::Kill); 2902 2903 BuildMI(checkMBB, DL, TII.get(X86::MOV32ri), ScratchReg2) 2904 .addImm(TlsOffset); 2905 BuildMI(checkMBB, DL, TII.get(X86::CMP32rm)) 2906 .addReg(ScratchReg) 2907 .addReg(ScratchReg2).addImm(1).addReg(0) 2908 .addImm(0) 2909 .addReg(TlsReg); 2910 2911 if (SaveScratch2) 2912 BuildMI(checkMBB, DL, TII.get(X86::POP32r), ScratchReg2); 2913 } 2914 } 2915 2916 // This jump is taken if SP >= (Stacklet Limit + Stack Space required). 2917 // It jumps to normal execution of the function body. 2918 BuildMI(checkMBB, DL, TII.get(X86::JCC_1)).addMBB(&PrologueMBB).addImm(X86::COND_A); 2919 2920 // On 32 bit we first push the arguments size and then the frame size. On 64 2921 // bit, we pass the stack frame size in r10 and the argument size in r11. 2922 if (Is64Bit) { 2923 // Functions with nested arguments use R10, so it needs to be saved across 2924 // the call to _morestack 2925 2926 const unsigned RegAX = IsLP64 ? X86::RAX : X86::EAX; 2927 const unsigned Reg10 = IsLP64 ? X86::R10 : X86::R10D; 2928 const unsigned Reg11 = IsLP64 ? X86::R11 : X86::R11D; 2929 const unsigned MOVrr = IsLP64 ? X86::MOV64rr : X86::MOV32rr; 2930 const unsigned MOVri = IsLP64 ? X86::MOV64ri : X86::MOV32ri; 2931 2932 if (IsNested) 2933 BuildMI(allocMBB, DL, TII.get(MOVrr), RegAX).addReg(Reg10); 2934 2935 BuildMI(allocMBB, DL, TII.get(MOVri), Reg10) 2936 .addImm(StackSize); 2937 BuildMI(allocMBB, DL, TII.get(MOVri), Reg11) 2938 .addImm(X86FI->getArgumentStackSize()); 2939 } else { 2940 BuildMI(allocMBB, DL, TII.get(X86::PUSHi32)) 2941 .addImm(X86FI->getArgumentStackSize()); 2942 BuildMI(allocMBB, DL, TII.get(X86::PUSHi32)) 2943 .addImm(StackSize); 2944 } 2945 2946 // __morestack is in libgcc 2947 if (Is64Bit && MF.getTarget().getCodeModel() == CodeModel::Large) { 2948 // Under the large code model, we cannot assume that __morestack lives 2949 // within 2^31 bytes of the call site, so we cannot use pc-relative 2950 // addressing. We cannot perform the call via a temporary register, 2951 // as the rax register may be used to store the static chain, and all 2952 // other suitable registers may be either callee-save or used for 2953 // parameter passing. We cannot use the stack at this point either 2954 // because __morestack manipulates the stack directly. 2955 // 2956 // To avoid these issues, perform an indirect call via a read-only memory 2957 // location containing the address. 2958 // 2959 // This solution is not perfect, as it assumes that the .rodata section 2960 // is laid out within 2^31 bytes of each function body, but this seems 2961 // to be sufficient for JIT. 2962 // FIXME: Add retpoline support and remove the error here.. 2963 if (STI.useIndirectThunkCalls()) 2964 report_fatal_error("Emitting morestack calls on 64-bit with the large " 2965 "code model and thunks not yet implemented."); 2966 BuildMI(allocMBB, DL, TII.get(X86::CALL64m)) 2967 .addReg(X86::RIP) 2968 .addImm(0) 2969 .addReg(0) 2970 .addExternalSymbol("__morestack_addr") 2971 .addReg(0); 2972 MF.getMMI().setUsesMorestackAddr(true); 2973 } else { 2974 if (Is64Bit) 2975 BuildMI(allocMBB, DL, TII.get(X86::CALL64pcrel32)) 2976 .addExternalSymbol("__morestack"); 2977 else 2978 BuildMI(allocMBB, DL, TII.get(X86::CALLpcrel32)) 2979 .addExternalSymbol("__morestack"); 2980 } 2981 2982 if (IsNested) 2983 BuildMI(allocMBB, DL, TII.get(X86::MORESTACK_RET_RESTORE_R10)); 2984 else 2985 BuildMI(allocMBB, DL, TII.get(X86::MORESTACK_RET)); 2986 2987 allocMBB->addSuccessor(&PrologueMBB); 2988 2989 checkMBB->addSuccessor(allocMBB, BranchProbability::getZero()); 2990 checkMBB->addSuccessor(&PrologueMBB, BranchProbability::getOne()); 2991 2992 #ifdef EXPENSIVE_CHECKS 2993 MF.verify(); 2994 #endif 2995 } 2996 2997 /// Lookup an ERTS parameter in the !hipe.literals named metadata node. 2998 /// HiPE provides Erlang Runtime System-internal parameters, such as PCB offsets 2999 /// to fields it needs, through a named metadata node "hipe.literals" containing 3000 /// name-value pairs. 3001 static unsigned getHiPELiteral( 3002 NamedMDNode *HiPELiteralsMD, const StringRef LiteralName) { 3003 for (int i = 0, e = HiPELiteralsMD->getNumOperands(); i != e; ++i) { 3004 MDNode *Node = HiPELiteralsMD->getOperand(i); 3005 if (Node->getNumOperands() != 2) continue; 3006 MDString *NodeName = dyn_cast<MDString>(Node->getOperand(0)); 3007 ValueAsMetadata *NodeVal = dyn_cast<ValueAsMetadata>(Node->getOperand(1)); 3008 if (!NodeName || !NodeVal) continue; 3009 ConstantInt *ValConst = dyn_cast_or_null<ConstantInt>(NodeVal->getValue()); 3010 if (ValConst && NodeName->getString() == LiteralName) { 3011 return ValConst->getZExtValue(); 3012 } 3013 } 3014 3015 report_fatal_error("HiPE literal " + LiteralName 3016 + " required but not provided"); 3017 } 3018 3019 // Return true if there are no non-ehpad successors to MBB and there are no 3020 // non-meta instructions between MBBI and MBB.end(). 3021 static bool blockEndIsUnreachable(const MachineBasicBlock &MBB, 3022 MachineBasicBlock::const_iterator MBBI) { 3023 return llvm::all_of( 3024 MBB.successors(), 3025 [](const MachineBasicBlock *Succ) { return Succ->isEHPad(); }) && 3026 std::all_of(MBBI, MBB.end(), [](const MachineInstr &MI) { 3027 return MI.isMetaInstruction(); 3028 }); 3029 } 3030 3031 /// Erlang programs may need a special prologue to handle the stack size they 3032 /// might need at runtime. That is because Erlang/OTP does not implement a C 3033 /// stack but uses a custom implementation of hybrid stack/heap architecture. 3034 /// (for more information see Eric Stenman's Ph.D. thesis: 3035 /// http://publications.uu.se/uu/fulltext/nbn_se_uu_diva-2688.pdf) 3036 /// 3037 /// CheckStack: 3038 /// temp0 = sp - MaxStack 3039 /// if( temp0 < SP_LIMIT(P) ) goto IncStack else goto OldStart 3040 /// OldStart: 3041 /// ... 3042 /// IncStack: 3043 /// call inc_stack # doubles the stack space 3044 /// temp0 = sp - MaxStack 3045 /// if( temp0 < SP_LIMIT(P) ) goto IncStack else goto OldStart 3046 void X86FrameLowering::adjustForHiPEPrologue( 3047 MachineFunction &MF, MachineBasicBlock &PrologueMBB) const { 3048 MachineFrameInfo &MFI = MF.getFrameInfo(); 3049 DebugLoc DL; 3050 3051 // To support shrink-wrapping we would need to insert the new blocks 3052 // at the right place and update the branches to PrologueMBB. 3053 assert(&(*MF.begin()) == &PrologueMBB && "Shrink-wrapping not supported yet"); 3054 3055 // HiPE-specific values 3056 NamedMDNode *HiPELiteralsMD = MF.getMMI().getModule() 3057 ->getNamedMetadata("hipe.literals"); 3058 if (!HiPELiteralsMD) 3059 report_fatal_error( 3060 "Can't generate HiPE prologue without runtime parameters"); 3061 const unsigned HipeLeafWords 3062 = getHiPELiteral(HiPELiteralsMD, 3063 Is64Bit ? "AMD64_LEAF_WORDS" : "X86_LEAF_WORDS"); 3064 const unsigned CCRegisteredArgs = Is64Bit ? 6 : 5; 3065 const unsigned Guaranteed = HipeLeafWords * SlotSize; 3066 unsigned CallerStkArity = MF.getFunction().arg_size() > CCRegisteredArgs ? 3067 MF.getFunction().arg_size() - CCRegisteredArgs : 0; 3068 unsigned MaxStack = MFI.getStackSize() + CallerStkArity*SlotSize + SlotSize; 3069 3070 assert(STI.isTargetLinux() && 3071 "HiPE prologue is only supported on Linux operating systems."); 3072 3073 // Compute the largest caller's frame that is needed to fit the callees' 3074 // frames. This 'MaxStack' is computed from: 3075 // 3076 // a) the fixed frame size, which is the space needed for all spilled temps, 3077 // b) outgoing on-stack parameter areas, and 3078 // c) the minimum stack space this function needs to make available for the 3079 // functions it calls (a tunable ABI property). 3080 if (MFI.hasCalls()) { 3081 unsigned MoreStackForCalls = 0; 3082 3083 for (auto &MBB : MF) { 3084 for (auto &MI : MBB) { 3085 if (!MI.isCall()) 3086 continue; 3087 3088 // Get callee operand. 3089 const MachineOperand &MO = MI.getOperand(0); 3090 3091 // Only take account of global function calls (no closures etc.). 3092 if (!MO.isGlobal()) 3093 continue; 3094 3095 const Function *F = dyn_cast<Function>(MO.getGlobal()); 3096 if (!F) 3097 continue; 3098 3099 // Do not update 'MaxStack' for primitive and built-in functions 3100 // (encoded with names either starting with "erlang."/"bif_" or not 3101 // having a ".", such as a simple <Module>.<Function>.<Arity>, or an 3102 // "_", such as the BIF "suspend_0") as they are executed on another 3103 // stack. 3104 if (F->getName().contains("erlang.") || F->getName().contains("bif_") || 3105 F->getName().find_first_of("._") == StringRef::npos) 3106 continue; 3107 3108 unsigned CalleeStkArity = 3109 F->arg_size() > CCRegisteredArgs ? F->arg_size()-CCRegisteredArgs : 0; 3110 if (HipeLeafWords - 1 > CalleeStkArity) 3111 MoreStackForCalls = std::max(MoreStackForCalls, 3112 (HipeLeafWords - 1 - CalleeStkArity) * SlotSize); 3113 } 3114 } 3115 MaxStack += MoreStackForCalls; 3116 } 3117 3118 // If the stack frame needed is larger than the guaranteed then runtime checks 3119 // and calls to "inc_stack_0" BIF should be inserted in the assembly prologue. 3120 if (MaxStack > Guaranteed) { 3121 MachineBasicBlock *stackCheckMBB = MF.CreateMachineBasicBlock(); 3122 MachineBasicBlock *incStackMBB = MF.CreateMachineBasicBlock(); 3123 3124 for (const auto &LI : PrologueMBB.liveins()) { 3125 stackCheckMBB->addLiveIn(LI); 3126 incStackMBB->addLiveIn(LI); 3127 } 3128 3129 MF.push_front(incStackMBB); 3130 MF.push_front(stackCheckMBB); 3131 3132 unsigned ScratchReg, SPReg, PReg, SPLimitOffset; 3133 unsigned LEAop, CMPop, CALLop; 3134 SPLimitOffset = getHiPELiteral(HiPELiteralsMD, "P_NSP_LIMIT"); 3135 if (Is64Bit) { 3136 SPReg = X86::RSP; 3137 PReg = X86::RBP; 3138 LEAop = X86::LEA64r; 3139 CMPop = X86::CMP64rm; 3140 CALLop = X86::CALL64pcrel32; 3141 } else { 3142 SPReg = X86::ESP; 3143 PReg = X86::EBP; 3144 LEAop = X86::LEA32r; 3145 CMPop = X86::CMP32rm; 3146 CALLop = X86::CALLpcrel32; 3147 } 3148 3149 ScratchReg = GetScratchRegister(Is64Bit, IsLP64, MF, true); 3150 assert(!MF.getRegInfo().isLiveIn(ScratchReg) && 3151 "HiPE prologue scratch register is live-in"); 3152 3153 // Create new MBB for StackCheck: 3154 addRegOffset(BuildMI(stackCheckMBB, DL, TII.get(LEAop), ScratchReg), 3155 SPReg, false, -MaxStack); 3156 // SPLimitOffset is in a fixed heap location (pointed by BP). 3157 addRegOffset(BuildMI(stackCheckMBB, DL, TII.get(CMPop)) 3158 .addReg(ScratchReg), PReg, false, SPLimitOffset); 3159 BuildMI(stackCheckMBB, DL, TII.get(X86::JCC_1)).addMBB(&PrologueMBB).addImm(X86::COND_AE); 3160 3161 // Create new MBB for IncStack: 3162 BuildMI(incStackMBB, DL, TII.get(CALLop)). 3163 addExternalSymbol("inc_stack_0"); 3164 addRegOffset(BuildMI(incStackMBB, DL, TII.get(LEAop), ScratchReg), 3165 SPReg, false, -MaxStack); 3166 addRegOffset(BuildMI(incStackMBB, DL, TII.get(CMPop)) 3167 .addReg(ScratchReg), PReg, false, SPLimitOffset); 3168 BuildMI(incStackMBB, DL, TII.get(X86::JCC_1)).addMBB(incStackMBB).addImm(X86::COND_LE); 3169 3170 stackCheckMBB->addSuccessor(&PrologueMBB, {99, 100}); 3171 stackCheckMBB->addSuccessor(incStackMBB, {1, 100}); 3172 incStackMBB->addSuccessor(&PrologueMBB, {99, 100}); 3173 incStackMBB->addSuccessor(incStackMBB, {1, 100}); 3174 } 3175 #ifdef EXPENSIVE_CHECKS 3176 MF.verify(); 3177 #endif 3178 } 3179 3180 bool X86FrameLowering::adjustStackWithPops(MachineBasicBlock &MBB, 3181 MachineBasicBlock::iterator MBBI, 3182 const DebugLoc &DL, 3183 int Offset) const { 3184 if (Offset <= 0) 3185 return false; 3186 3187 if (Offset % SlotSize) 3188 return false; 3189 3190 int NumPops = Offset / SlotSize; 3191 // This is only worth it if we have at most 2 pops. 3192 if (NumPops != 1 && NumPops != 2) 3193 return false; 3194 3195 // Handle only the trivial case where the adjustment directly follows 3196 // a call. This is the most common one, anyway. 3197 if (MBBI == MBB.begin()) 3198 return false; 3199 MachineBasicBlock::iterator Prev = std::prev(MBBI); 3200 if (!Prev->isCall() || !Prev->getOperand(1).isRegMask()) 3201 return false; 3202 3203 unsigned Regs[2]; 3204 unsigned FoundRegs = 0; 3205 3206 const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 3207 const MachineOperand &RegMask = Prev->getOperand(1); 3208 3209 auto &RegClass = 3210 Is64Bit ? X86::GR64_NOREX_NOSPRegClass : X86::GR32_NOREX_NOSPRegClass; 3211 // Try to find up to NumPops free registers. 3212 for (auto Candidate : RegClass) { 3213 // Poor man's liveness: 3214 // Since we're immediately after a call, any register that is clobbered 3215 // by the call and not defined by it can be considered dead. 3216 if (!RegMask.clobbersPhysReg(Candidate)) 3217 continue; 3218 3219 // Don't clobber reserved registers 3220 if (MRI.isReserved(Candidate)) 3221 continue; 3222 3223 bool IsDef = false; 3224 for (const MachineOperand &MO : Prev->implicit_operands()) { 3225 if (MO.isReg() && MO.isDef() && 3226 TRI->isSuperOrSubRegisterEq(MO.getReg(), Candidate)) { 3227 IsDef = true; 3228 break; 3229 } 3230 } 3231 3232 if (IsDef) 3233 continue; 3234 3235 Regs[FoundRegs++] = Candidate; 3236 if (FoundRegs == (unsigned)NumPops) 3237 break; 3238 } 3239 3240 if (FoundRegs == 0) 3241 return false; 3242 3243 // If we found only one free register, but need two, reuse the same one twice. 3244 while (FoundRegs < (unsigned)NumPops) 3245 Regs[FoundRegs++] = Regs[0]; 3246 3247 for (int i = 0; i < NumPops; ++i) 3248 BuildMI(MBB, MBBI, DL, 3249 TII.get(STI.is64Bit() ? X86::POP64r : X86::POP32r), Regs[i]); 3250 3251 return true; 3252 } 3253 3254 MachineBasicBlock::iterator X86FrameLowering:: 3255 eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB, 3256 MachineBasicBlock::iterator I) const { 3257 bool reserveCallFrame = hasReservedCallFrame(MF); 3258 unsigned Opcode = I->getOpcode(); 3259 bool isDestroy = Opcode == TII.getCallFrameDestroyOpcode(); 3260 DebugLoc DL = I->getDebugLoc(); // copy DebugLoc as I will be erased. 3261 uint64_t Amount = TII.getFrameSize(*I); 3262 uint64_t InternalAmt = (isDestroy || Amount) ? TII.getFrameAdjustment(*I) : 0; 3263 I = MBB.erase(I); 3264 auto InsertPos = skipDebugInstructionsForward(I, MBB.end()); 3265 3266 // Try to avoid emitting dead SP adjustments if the block end is unreachable, 3267 // typically because the function is marked noreturn (abort, throw, 3268 // assert_fail, etc). 3269 if (isDestroy && blockEndIsUnreachable(MBB, I)) 3270 return I; 3271 3272 if (!reserveCallFrame) { 3273 // If the stack pointer can be changed after prologue, turn the 3274 // adjcallstackup instruction into a 'sub ESP, <amt>' and the 3275 // adjcallstackdown instruction into 'add ESP, <amt>' 3276 3277 // We need to keep the stack aligned properly. To do this, we round the 3278 // amount of space needed for the outgoing arguments up to the next 3279 // alignment boundary. 3280 Amount = alignTo(Amount, getStackAlign()); 3281 3282 const Function &F = MF.getFunction(); 3283 bool WindowsCFI = MF.getTarget().getMCAsmInfo()->usesWindowsCFI(); 3284 bool DwarfCFI = !WindowsCFI && MF.needsFrameMoves(); 3285 3286 // If we have any exception handlers in this function, and we adjust 3287 // the SP before calls, we may need to indicate this to the unwinder 3288 // using GNU_ARGS_SIZE. Note that this may be necessary even when 3289 // Amount == 0, because the preceding function may have set a non-0 3290 // GNU_ARGS_SIZE. 3291 // TODO: We don't need to reset this between subsequent functions, 3292 // if it didn't change. 3293 bool HasDwarfEHHandlers = !WindowsCFI && !MF.getLandingPads().empty(); 3294 3295 if (HasDwarfEHHandlers && !isDestroy && 3296 MF.getInfo<X86MachineFunctionInfo>()->getHasPushSequences()) 3297 BuildCFI(MBB, InsertPos, DL, 3298 MCCFIInstruction::createGnuArgsSize(nullptr, Amount)); 3299 3300 if (Amount == 0) 3301 return I; 3302 3303 // Factor out the amount that gets handled inside the sequence 3304 // (Pushes of argument for frame setup, callee pops for frame destroy) 3305 Amount -= InternalAmt; 3306 3307 // TODO: This is needed only if we require precise CFA. 3308 // If this is a callee-pop calling convention, emit a CFA adjust for 3309 // the amount the callee popped. 3310 if (isDestroy && InternalAmt && DwarfCFI && !hasFP(MF)) 3311 BuildCFI(MBB, InsertPos, DL, 3312 MCCFIInstruction::createAdjustCfaOffset(nullptr, -InternalAmt)); 3313 3314 // Add Amount to SP to destroy a frame, or subtract to setup. 3315 int64_t StackAdjustment = isDestroy ? Amount : -Amount; 3316 3317 if (StackAdjustment) { 3318 // Merge with any previous or following adjustment instruction. Note: the 3319 // instructions merged with here do not have CFI, so their stack 3320 // adjustments do not feed into CfaAdjustment. 3321 StackAdjustment += mergeSPUpdates(MBB, InsertPos, true); 3322 StackAdjustment += mergeSPUpdates(MBB, InsertPos, false); 3323 3324 if (StackAdjustment) { 3325 if (!(F.hasMinSize() && 3326 adjustStackWithPops(MBB, InsertPos, DL, StackAdjustment))) 3327 BuildStackAdjustment(MBB, InsertPos, DL, StackAdjustment, 3328 /*InEpilogue=*/false); 3329 } 3330 } 3331 3332 if (DwarfCFI && !hasFP(MF)) { 3333 // If we don't have FP, but need to generate unwind information, 3334 // we need to set the correct CFA offset after the stack adjustment. 3335 // How much we adjust the CFA offset depends on whether we're emitting 3336 // CFI only for EH purposes or for debugging. EH only requires the CFA 3337 // offset to be correct at each call site, while for debugging we want 3338 // it to be more precise. 3339 3340 int64_t CfaAdjustment = -StackAdjustment; 3341 // TODO: When not using precise CFA, we also need to adjust for the 3342 // InternalAmt here. 3343 if (CfaAdjustment) { 3344 BuildCFI(MBB, InsertPos, DL, 3345 MCCFIInstruction::createAdjustCfaOffset(nullptr, 3346 CfaAdjustment)); 3347 } 3348 } 3349 3350 return I; 3351 } 3352 3353 if (InternalAmt) { 3354 MachineBasicBlock::iterator CI = I; 3355 MachineBasicBlock::iterator B = MBB.begin(); 3356 while (CI != B && !std::prev(CI)->isCall()) 3357 --CI; 3358 BuildStackAdjustment(MBB, CI, DL, -InternalAmt, /*InEpilogue=*/false); 3359 } 3360 3361 return I; 3362 } 3363 3364 bool X86FrameLowering::canUseAsPrologue(const MachineBasicBlock &MBB) const { 3365 assert(MBB.getParent() && "Block is not attached to a function!"); 3366 const MachineFunction &MF = *MBB.getParent(); 3367 if (!MBB.isLiveIn(X86::EFLAGS)) 3368 return true; 3369 3370 const X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); 3371 return !TRI->hasStackRealignment(MF) && !X86FI->hasSwiftAsyncContext(); 3372 } 3373 3374 bool X86FrameLowering::canUseAsEpilogue(const MachineBasicBlock &MBB) const { 3375 assert(MBB.getParent() && "Block is not attached to a function!"); 3376 3377 // Win64 has strict requirements in terms of epilogue and we are 3378 // not taking a chance at messing with them. 3379 // I.e., unless this block is already an exit block, we can't use 3380 // it as an epilogue. 3381 if (STI.isTargetWin64() && !MBB.succ_empty() && !MBB.isReturnBlock()) 3382 return false; 3383 3384 // Swift async context epilogue has a BTR instruction that clobbers parts of 3385 // EFLAGS. 3386 const MachineFunction &MF = *MBB.getParent(); 3387 if (MF.getInfo<X86MachineFunctionInfo>()->hasSwiftAsyncContext()) 3388 return !flagsNeedToBePreservedBeforeTheTerminators(MBB); 3389 3390 if (canUseLEAForSPInEpilogue(*MBB.getParent())) 3391 return true; 3392 3393 // If we cannot use LEA to adjust SP, we may need to use ADD, which 3394 // clobbers the EFLAGS. Check that we do not need to preserve it, 3395 // otherwise, conservatively assume this is not 3396 // safe to insert the epilogue here. 3397 return !flagsNeedToBePreservedBeforeTheTerminators(MBB); 3398 } 3399 3400 bool X86FrameLowering::enableShrinkWrapping(const MachineFunction &MF) const { 3401 // If we may need to emit frameless compact unwind information, give 3402 // up as this is currently broken: PR25614. 3403 bool CompactUnwind = 3404 MF.getMMI().getContext().getObjectFileInfo()->getCompactUnwindSection() != 3405 nullptr; 3406 return (MF.getFunction().hasFnAttribute(Attribute::NoUnwind) || hasFP(MF) || 3407 !CompactUnwind) && 3408 // The lowering of segmented stack and HiPE only support entry 3409 // blocks as prologue blocks: PR26107. This limitation may be 3410 // lifted if we fix: 3411 // - adjustForSegmentedStacks 3412 // - adjustForHiPEPrologue 3413 MF.getFunction().getCallingConv() != CallingConv::HiPE && 3414 !MF.shouldSplitStack(); 3415 } 3416 3417 MachineBasicBlock::iterator X86FrameLowering::restoreWin32EHStackPointers( 3418 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, 3419 const DebugLoc &DL, bool RestoreSP) const { 3420 assert(STI.isTargetWindowsMSVC() && "funclets only supported in MSVC env"); 3421 assert(STI.isTargetWin32() && "EBP/ESI restoration only required on win32"); 3422 assert(STI.is32Bit() && !Uses64BitFramePtr && 3423 "restoring EBP/ESI on non-32-bit target"); 3424 3425 MachineFunction &MF = *MBB.getParent(); 3426 Register FramePtr = TRI->getFrameRegister(MF); 3427 Register BasePtr = TRI->getBaseRegister(); 3428 WinEHFuncInfo &FuncInfo = *MF.getWinEHFuncInfo(); 3429 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); 3430 MachineFrameInfo &MFI = MF.getFrameInfo(); 3431 3432 // FIXME: Don't set FrameSetup flag in catchret case. 3433 3434 int FI = FuncInfo.EHRegNodeFrameIndex; 3435 int EHRegSize = MFI.getObjectSize(FI); 3436 3437 if (RestoreSP) { 3438 // MOV32rm -EHRegSize(%ebp), %esp 3439 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32rm), X86::ESP), 3440 X86::EBP, true, -EHRegSize) 3441 .setMIFlag(MachineInstr::FrameSetup); 3442 } 3443 3444 Register UsedReg; 3445 int EHRegOffset = getFrameIndexReference(MF, FI, UsedReg).getFixed(); 3446 int EndOffset = -EHRegOffset - EHRegSize; 3447 FuncInfo.EHRegNodeEndOffset = EndOffset; 3448 3449 if (UsedReg == FramePtr) { 3450 // ADD $offset, %ebp 3451 unsigned ADDri = getADDriOpcode(false, EndOffset); 3452 BuildMI(MBB, MBBI, DL, TII.get(ADDri), FramePtr) 3453 .addReg(FramePtr) 3454 .addImm(EndOffset) 3455 .setMIFlag(MachineInstr::FrameSetup) 3456 ->getOperand(3) 3457 .setIsDead(); 3458 assert(EndOffset >= 0 && 3459 "end of registration object above normal EBP position!"); 3460 } else if (UsedReg == BasePtr) { 3461 // LEA offset(%ebp), %esi 3462 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::LEA32r), BasePtr), 3463 FramePtr, false, EndOffset) 3464 .setMIFlag(MachineInstr::FrameSetup); 3465 // MOV32rm SavedEBPOffset(%esi), %ebp 3466 assert(X86FI->getHasSEHFramePtrSave()); 3467 int Offset = 3468 getFrameIndexReference(MF, X86FI->getSEHFramePtrSaveIndex(), UsedReg) 3469 .getFixed(); 3470 assert(UsedReg == BasePtr); 3471 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32rm), FramePtr), 3472 UsedReg, true, Offset) 3473 .setMIFlag(MachineInstr::FrameSetup); 3474 } else { 3475 llvm_unreachable("32-bit frames with WinEH must use FramePtr or BasePtr"); 3476 } 3477 return MBBI; 3478 } 3479 3480 int X86FrameLowering::getInitialCFAOffset(const MachineFunction &MF) const { 3481 return TRI->getSlotSize(); 3482 } 3483 3484 Register 3485 X86FrameLowering::getInitialCFARegister(const MachineFunction &MF) const { 3486 return TRI->getDwarfRegNum(StackPtr, true); 3487 } 3488 3489 namespace { 3490 // Struct used by orderFrameObjects to help sort the stack objects. 3491 struct X86FrameSortingObject { 3492 bool IsValid = false; // true if we care about this Object. 3493 unsigned ObjectIndex = 0; // Index of Object into MFI list. 3494 unsigned ObjectSize = 0; // Size of Object in bytes. 3495 Align ObjectAlignment = Align(1); // Alignment of Object in bytes. 3496 unsigned ObjectNumUses = 0; // Object static number of uses. 3497 }; 3498 3499 // The comparison function we use for std::sort to order our local 3500 // stack symbols. The current algorithm is to use an estimated 3501 // "density". This takes into consideration the size and number of 3502 // uses each object has in order to roughly minimize code size. 3503 // So, for example, an object of size 16B that is referenced 5 times 3504 // will get higher priority than 4 4B objects referenced 1 time each. 3505 // It's not perfect and we may be able to squeeze a few more bytes out of 3506 // it (for example : 0(esp) requires fewer bytes, symbols allocated at the 3507 // fringe end can have special consideration, given their size is less 3508 // important, etc.), but the algorithmic complexity grows too much to be 3509 // worth the extra gains we get. This gets us pretty close. 3510 // The final order leaves us with objects with highest priority going 3511 // at the end of our list. 3512 struct X86FrameSortingComparator { 3513 inline bool operator()(const X86FrameSortingObject &A, 3514 const X86FrameSortingObject &B) const { 3515 uint64_t DensityAScaled, DensityBScaled; 3516 3517 // For consistency in our comparison, all invalid objects are placed 3518 // at the end. This also allows us to stop walking when we hit the 3519 // first invalid item after it's all sorted. 3520 if (!A.IsValid) 3521 return false; 3522 if (!B.IsValid) 3523 return true; 3524 3525 // The density is calculated by doing : 3526 // (double)DensityA = A.ObjectNumUses / A.ObjectSize 3527 // (double)DensityB = B.ObjectNumUses / B.ObjectSize 3528 // Since this approach may cause inconsistencies in 3529 // the floating point <, >, == comparisons, depending on the floating 3530 // point model with which the compiler was built, we're going 3531 // to scale both sides by multiplying with 3532 // A.ObjectSize * B.ObjectSize. This ends up factoring away 3533 // the division and, with it, the need for any floating point 3534 // arithmetic. 3535 DensityAScaled = static_cast<uint64_t>(A.ObjectNumUses) * 3536 static_cast<uint64_t>(B.ObjectSize); 3537 DensityBScaled = static_cast<uint64_t>(B.ObjectNumUses) * 3538 static_cast<uint64_t>(A.ObjectSize); 3539 3540 // If the two densities are equal, prioritize highest alignment 3541 // objects. This allows for similar alignment objects 3542 // to be packed together (given the same density). 3543 // There's room for improvement here, also, since we can pack 3544 // similar alignment (different density) objects next to each 3545 // other to save padding. This will also require further 3546 // complexity/iterations, and the overall gain isn't worth it, 3547 // in general. Something to keep in mind, though. 3548 if (DensityAScaled == DensityBScaled) 3549 return A.ObjectAlignment < B.ObjectAlignment; 3550 3551 return DensityAScaled < DensityBScaled; 3552 } 3553 }; 3554 } // namespace 3555 3556 // Order the symbols in the local stack. 3557 // We want to place the local stack objects in some sort of sensible order. 3558 // The heuristic we use is to try and pack them according to static number 3559 // of uses and size of object in order to minimize code size. 3560 void X86FrameLowering::orderFrameObjects( 3561 const MachineFunction &MF, SmallVectorImpl<int> &ObjectsToAllocate) const { 3562 const MachineFrameInfo &MFI = MF.getFrameInfo(); 3563 3564 // Don't waste time if there's nothing to do. 3565 if (ObjectsToAllocate.empty()) 3566 return; 3567 3568 // Create an array of all MFI objects. We won't need all of these 3569 // objects, but we're going to create a full array of them to make 3570 // it easier to index into when we're counting "uses" down below. 3571 // We want to be able to easily/cheaply access an object by simply 3572 // indexing into it, instead of having to search for it every time. 3573 std::vector<X86FrameSortingObject> SortingObjects(MFI.getObjectIndexEnd()); 3574 3575 // Walk the objects we care about and mark them as such in our working 3576 // struct. 3577 for (auto &Obj : ObjectsToAllocate) { 3578 SortingObjects[Obj].IsValid = true; 3579 SortingObjects[Obj].ObjectIndex = Obj; 3580 SortingObjects[Obj].ObjectAlignment = MFI.getObjectAlign(Obj); 3581 // Set the size. 3582 int ObjectSize = MFI.getObjectSize(Obj); 3583 if (ObjectSize == 0) 3584 // Variable size. Just use 4. 3585 SortingObjects[Obj].ObjectSize = 4; 3586 else 3587 SortingObjects[Obj].ObjectSize = ObjectSize; 3588 } 3589 3590 // Count the number of uses for each object. 3591 for (auto &MBB : MF) { 3592 for (auto &MI : MBB) { 3593 if (MI.isDebugInstr()) 3594 continue; 3595 for (const MachineOperand &MO : MI.operands()) { 3596 // Check to see if it's a local stack symbol. 3597 if (!MO.isFI()) 3598 continue; 3599 int Index = MO.getIndex(); 3600 // Check to see if it falls within our range, and is tagged 3601 // to require ordering. 3602 if (Index >= 0 && Index < MFI.getObjectIndexEnd() && 3603 SortingObjects[Index].IsValid) 3604 SortingObjects[Index].ObjectNumUses++; 3605 } 3606 } 3607 } 3608 3609 // Sort the objects using X86FrameSortingAlgorithm (see its comment for 3610 // info). 3611 llvm::stable_sort(SortingObjects, X86FrameSortingComparator()); 3612 3613 // Now modify the original list to represent the final order that 3614 // we want. The order will depend on whether we're going to access them 3615 // from the stack pointer or the frame pointer. For SP, the list should 3616 // end up with the END containing objects that we want with smaller offsets. 3617 // For FP, it should be flipped. 3618 int i = 0; 3619 for (auto &Obj : SortingObjects) { 3620 // All invalid items are sorted at the end, so it's safe to stop. 3621 if (!Obj.IsValid) 3622 break; 3623 ObjectsToAllocate[i++] = Obj.ObjectIndex; 3624 } 3625 3626 // Flip it if we're accessing off of the FP. 3627 if (!TRI->hasStackRealignment(MF) && hasFP(MF)) 3628 std::reverse(ObjectsToAllocate.begin(), ObjectsToAllocate.end()); 3629 } 3630 3631 3632 unsigned X86FrameLowering::getWinEHParentFrameOffset(const MachineFunction &MF) const { 3633 // RDX, the parent frame pointer, is homed into 16(%rsp) in the prologue. 3634 unsigned Offset = 16; 3635 // RBP is immediately pushed. 3636 Offset += SlotSize; 3637 // All callee-saved registers are then pushed. 3638 Offset += MF.getInfo<X86MachineFunctionInfo>()->getCalleeSavedFrameSize(); 3639 // Every funclet allocates enough stack space for the largest outgoing call. 3640 Offset += getWinEHFuncletFrameSize(MF); 3641 return Offset; 3642 } 3643 3644 void X86FrameLowering::processFunctionBeforeFrameFinalized( 3645 MachineFunction &MF, RegScavenger *RS) const { 3646 // Mark the function as not having WinCFI. We will set it back to true in 3647 // emitPrologue if it gets called and emits CFI. 3648 MF.setHasWinCFI(false); 3649 3650 // If we are using Windows x64 CFI, ensure that the stack is always 8 byte 3651 // aligned. The format doesn't support misaligned stack adjustments. 3652 if (MF.getTarget().getMCAsmInfo()->usesWindowsCFI()) 3653 MF.getFrameInfo().ensureMaxAlignment(Align(SlotSize)); 3654 3655 // If this function isn't doing Win64-style C++ EH, we don't need to do 3656 // anything. 3657 if (STI.is64Bit() && MF.hasEHFunclets() && 3658 classifyEHPersonality(MF.getFunction().getPersonalityFn()) == 3659 EHPersonality::MSVC_CXX) { 3660 adjustFrameForMsvcCxxEh(MF); 3661 } 3662 } 3663 3664 void X86FrameLowering::adjustFrameForMsvcCxxEh(MachineFunction &MF) const { 3665 // Win64 C++ EH needs to allocate the UnwindHelp object at some fixed offset 3666 // relative to RSP after the prologue. Find the offset of the last fixed 3667 // object, so that we can allocate a slot immediately following it. If there 3668 // were no fixed objects, use offset -SlotSize, which is immediately after the 3669 // return address. Fixed objects have negative frame indices. 3670 MachineFrameInfo &MFI = MF.getFrameInfo(); 3671 WinEHFuncInfo &EHInfo = *MF.getWinEHFuncInfo(); 3672 int64_t MinFixedObjOffset = -SlotSize; 3673 for (int I = MFI.getObjectIndexBegin(); I < 0; ++I) 3674 MinFixedObjOffset = std::min(MinFixedObjOffset, MFI.getObjectOffset(I)); 3675 3676 for (WinEHTryBlockMapEntry &TBME : EHInfo.TryBlockMap) { 3677 for (WinEHHandlerType &H : TBME.HandlerArray) { 3678 int FrameIndex = H.CatchObj.FrameIndex; 3679 if (FrameIndex != INT_MAX) { 3680 // Ensure alignment. 3681 unsigned Align = MFI.getObjectAlign(FrameIndex).value(); 3682 MinFixedObjOffset -= std::abs(MinFixedObjOffset) % Align; 3683 MinFixedObjOffset -= MFI.getObjectSize(FrameIndex); 3684 MFI.setObjectOffset(FrameIndex, MinFixedObjOffset); 3685 } 3686 } 3687 } 3688 3689 // Ensure alignment. 3690 MinFixedObjOffset -= std::abs(MinFixedObjOffset) % 8; 3691 int64_t UnwindHelpOffset = MinFixedObjOffset - SlotSize; 3692 int UnwindHelpFI = 3693 MFI.CreateFixedObject(SlotSize, UnwindHelpOffset, /*IsImmutable=*/false); 3694 EHInfo.UnwindHelpFrameIdx = UnwindHelpFI; 3695 3696 // Store -2 into UnwindHelp on function entry. We have to scan forwards past 3697 // other frame setup instructions. 3698 MachineBasicBlock &MBB = MF.front(); 3699 auto MBBI = MBB.begin(); 3700 while (MBBI != MBB.end() && MBBI->getFlag(MachineInstr::FrameSetup)) 3701 ++MBBI; 3702 3703 DebugLoc DL = MBB.findDebugLoc(MBBI); 3704 addFrameReference(BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64mi32)), 3705 UnwindHelpFI) 3706 .addImm(-2); 3707 } 3708 3709 void X86FrameLowering::processFunctionBeforeFrameIndicesReplaced( 3710 MachineFunction &MF, RegScavenger *RS) const { 3711 if (STI.is32Bit() && MF.hasEHFunclets()) 3712 restoreWinEHStackPointersInParent(MF); 3713 } 3714 3715 void X86FrameLowering::restoreWinEHStackPointersInParent( 3716 MachineFunction &MF) const { 3717 // 32-bit functions have to restore stack pointers when control is transferred 3718 // back to the parent function. These blocks are identified as eh pads that 3719 // are not funclet entries. 3720 bool IsSEH = isAsynchronousEHPersonality( 3721 classifyEHPersonality(MF.getFunction().getPersonalityFn())); 3722 for (MachineBasicBlock &MBB : MF) { 3723 bool NeedsRestore = MBB.isEHPad() && !MBB.isEHFuncletEntry(); 3724 if (NeedsRestore) 3725 restoreWin32EHStackPointers(MBB, MBB.begin(), DebugLoc(), 3726 /*RestoreSP=*/IsSEH); 3727 } 3728 } 3729