1 //===-- X86FrameLowering.cpp - X86 Frame Information ----------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file contains the X86 implementation of TargetFrameLowering class. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "X86FrameLowering.h" 14 #include "X86InstrBuilder.h" 15 #include "X86InstrInfo.h" 16 #include "X86MachineFunctionInfo.h" 17 #include "X86Subtarget.h" 18 #include "X86TargetMachine.h" 19 #include "llvm/ADT/SmallSet.h" 20 #include "llvm/ADT/Statistic.h" 21 #include "llvm/Analysis/EHPersonalities.h" 22 #include "llvm/CodeGen/MachineFrameInfo.h" 23 #include "llvm/CodeGen/MachineFunction.h" 24 #include "llvm/CodeGen/MachineInstrBuilder.h" 25 #include "llvm/CodeGen/MachineModuleInfo.h" 26 #include "llvm/CodeGen/MachineRegisterInfo.h" 27 #include "llvm/CodeGen/WinEHFuncInfo.h" 28 #include "llvm/IR/DataLayout.h" 29 #include "llvm/IR/Function.h" 30 #include "llvm/MC/MCAsmInfo.h" 31 #include "llvm/MC/MCObjectFileInfo.h" 32 #include "llvm/MC/MCSymbol.h" 33 #include "llvm/Support/Debug.h" 34 #include "llvm/Target/TargetOptions.h" 35 #include <cstdlib> 36 37 #define DEBUG_TYPE "x86-fl" 38 39 STATISTIC(NumFrameLoopProbe, "Number of loop stack probes used in prologue"); 40 STATISTIC(NumFrameExtraProbe, 41 "Number of extra stack probes generated in prologue"); 42 43 using namespace llvm; 44 45 X86FrameLowering::X86FrameLowering(const X86Subtarget &STI, 46 MaybeAlign StackAlignOverride) 47 : TargetFrameLowering(StackGrowsDown, StackAlignOverride.valueOrOne(), 48 STI.is64Bit() ? -8 : -4), 49 STI(STI), TII(*STI.getInstrInfo()), TRI(STI.getRegisterInfo()) { 50 // Cache a bunch of frame-related predicates for this subtarget. 51 SlotSize = TRI->getSlotSize(); 52 Is64Bit = STI.is64Bit(); 53 IsLP64 = STI.isTarget64BitLP64(); 54 // standard x86_64 and NaCl use 64-bit frame/stack pointers, x32 - 32-bit. 55 Uses64BitFramePtr = STI.isTarget64BitLP64() || STI.isTargetNaCl64(); 56 StackPtr = TRI->getStackRegister(); 57 } 58 59 bool X86FrameLowering::hasReservedCallFrame(const MachineFunction &MF) const { 60 return !MF.getFrameInfo().hasVarSizedObjects() && 61 !MF.getInfo<X86MachineFunctionInfo>()->getHasPushSequences() && 62 !MF.getInfo<X86MachineFunctionInfo>()->hasPreallocatedCall(); 63 } 64 65 /// canSimplifyCallFramePseudos - If there is a reserved call frame, the 66 /// call frame pseudos can be simplified. Having a FP, as in the default 67 /// implementation, is not sufficient here since we can't always use it. 68 /// Use a more nuanced condition. 69 bool 70 X86FrameLowering::canSimplifyCallFramePseudos(const MachineFunction &MF) const { 71 return hasReservedCallFrame(MF) || 72 MF.getInfo<X86MachineFunctionInfo>()->hasPreallocatedCall() || 73 (hasFP(MF) && !TRI->hasStackRealignment(MF)) || 74 TRI->hasBasePointer(MF); 75 } 76 77 // needsFrameIndexResolution - Do we need to perform FI resolution for 78 // this function. Normally, this is required only when the function 79 // has any stack objects. However, FI resolution actually has another job, 80 // not apparent from the title - it resolves callframesetup/destroy 81 // that were not simplified earlier. 82 // So, this is required for x86 functions that have push sequences even 83 // when there are no stack objects. 84 bool 85 X86FrameLowering::needsFrameIndexResolution(const MachineFunction &MF) const { 86 return MF.getFrameInfo().hasStackObjects() || 87 MF.getInfo<X86MachineFunctionInfo>()->getHasPushSequences(); 88 } 89 90 /// hasFP - Return true if the specified function should have a dedicated frame 91 /// pointer register. This is true if the function has variable sized allocas 92 /// or if frame pointer elimination is disabled. 93 bool X86FrameLowering::hasFP(const MachineFunction &MF) const { 94 const MachineFrameInfo &MFI = MF.getFrameInfo(); 95 return (MF.getTarget().Options.DisableFramePointerElim(MF) || 96 TRI->hasStackRealignment(MF) || MFI.hasVarSizedObjects() || 97 MFI.isFrameAddressTaken() || MFI.hasOpaqueSPAdjustment() || 98 MF.getInfo<X86MachineFunctionInfo>()->getForceFramePointer() || 99 MF.getInfo<X86MachineFunctionInfo>()->hasPreallocatedCall() || 100 MF.callsUnwindInit() || MF.hasEHFunclets() || MF.callsEHReturn() || 101 MFI.hasStackMap() || MFI.hasPatchPoint() || 102 MFI.hasCopyImplyingStackAdjustment()); 103 } 104 105 static unsigned getSUBriOpcode(bool IsLP64, int64_t Imm) { 106 if (IsLP64) { 107 if (isInt<8>(Imm)) 108 return X86::SUB64ri8; 109 return X86::SUB64ri32; 110 } else { 111 if (isInt<8>(Imm)) 112 return X86::SUB32ri8; 113 return X86::SUB32ri; 114 } 115 } 116 117 static unsigned getADDriOpcode(bool IsLP64, int64_t Imm) { 118 if (IsLP64) { 119 if (isInt<8>(Imm)) 120 return X86::ADD64ri8; 121 return X86::ADD64ri32; 122 } else { 123 if (isInt<8>(Imm)) 124 return X86::ADD32ri8; 125 return X86::ADD32ri; 126 } 127 } 128 129 static unsigned getSUBrrOpcode(bool IsLP64) { 130 return IsLP64 ? X86::SUB64rr : X86::SUB32rr; 131 } 132 133 static unsigned getADDrrOpcode(bool IsLP64) { 134 return IsLP64 ? X86::ADD64rr : X86::ADD32rr; 135 } 136 137 static unsigned getANDriOpcode(bool IsLP64, int64_t Imm) { 138 if (IsLP64) { 139 if (isInt<8>(Imm)) 140 return X86::AND64ri8; 141 return X86::AND64ri32; 142 } 143 if (isInt<8>(Imm)) 144 return X86::AND32ri8; 145 return X86::AND32ri; 146 } 147 148 static unsigned getLEArOpcode(bool IsLP64) { 149 return IsLP64 ? X86::LEA64r : X86::LEA32r; 150 } 151 152 static bool isEAXLiveIn(MachineBasicBlock &MBB) { 153 for (MachineBasicBlock::RegisterMaskPair RegMask : MBB.liveins()) { 154 unsigned Reg = RegMask.PhysReg; 155 156 if (Reg == X86::RAX || Reg == X86::EAX || Reg == X86::AX || 157 Reg == X86::AH || Reg == X86::AL) 158 return true; 159 } 160 161 return false; 162 } 163 164 /// Check if the flags need to be preserved before the terminators. 165 /// This would be the case, if the eflags is live-in of the region 166 /// composed by the terminators or live-out of that region, without 167 /// being defined by a terminator. 168 static bool 169 flagsNeedToBePreservedBeforeTheTerminators(const MachineBasicBlock &MBB) { 170 for (const MachineInstr &MI : MBB.terminators()) { 171 bool BreakNext = false; 172 for (const MachineOperand &MO : MI.operands()) { 173 if (!MO.isReg()) 174 continue; 175 Register Reg = MO.getReg(); 176 if (Reg != X86::EFLAGS) 177 continue; 178 179 // This terminator needs an eflags that is not defined 180 // by a previous another terminator: 181 // EFLAGS is live-in of the region composed by the terminators. 182 if (!MO.isDef()) 183 return true; 184 // This terminator defines the eflags, i.e., we don't need to preserve it. 185 // However, we still need to check this specific terminator does not 186 // read a live-in value. 187 BreakNext = true; 188 } 189 // We found a definition of the eflags, no need to preserve them. 190 if (BreakNext) 191 return false; 192 } 193 194 // None of the terminators use or define the eflags. 195 // Check if they are live-out, that would imply we need to preserve them. 196 for (const MachineBasicBlock *Succ : MBB.successors()) 197 if (Succ->isLiveIn(X86::EFLAGS)) 198 return true; 199 200 return false; 201 } 202 203 /// emitSPUpdate - Emit a series of instructions to increment / decrement the 204 /// stack pointer by a constant value. 205 void X86FrameLowering::emitSPUpdate(MachineBasicBlock &MBB, 206 MachineBasicBlock::iterator &MBBI, 207 const DebugLoc &DL, 208 int64_t NumBytes, bool InEpilogue) const { 209 bool isSub = NumBytes < 0; 210 uint64_t Offset = isSub ? -NumBytes : NumBytes; 211 MachineInstr::MIFlag Flag = 212 isSub ? MachineInstr::FrameSetup : MachineInstr::FrameDestroy; 213 214 uint64_t Chunk = (1LL << 31) - 1; 215 216 MachineFunction &MF = *MBB.getParent(); 217 const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>(); 218 const X86TargetLowering &TLI = *STI.getTargetLowering(); 219 const bool EmitInlineStackProbe = TLI.hasInlineStackProbe(MF); 220 221 // It's ok to not take into account large chunks when probing, as the 222 // allocation is split in smaller chunks anyway. 223 if (EmitInlineStackProbe && !InEpilogue) { 224 225 // This pseudo-instruction is going to be expanded, potentially using a 226 // loop, by inlineStackProbe(). 227 BuildMI(MBB, MBBI, DL, TII.get(X86::STACKALLOC_W_PROBING)).addImm(Offset); 228 return; 229 } else if (Offset > Chunk) { 230 // Rather than emit a long series of instructions for large offsets, 231 // load the offset into a register and do one sub/add 232 unsigned Reg = 0; 233 unsigned Rax = (unsigned)(Is64Bit ? X86::RAX : X86::EAX); 234 235 if (isSub && !isEAXLiveIn(MBB)) 236 Reg = Rax; 237 else 238 Reg = TRI->findDeadCallerSavedReg(MBB, MBBI); 239 240 unsigned MovRIOpc = Is64Bit ? X86::MOV64ri : X86::MOV32ri; 241 unsigned AddSubRROpc = 242 isSub ? getSUBrrOpcode(Is64Bit) : getADDrrOpcode(Is64Bit); 243 if (Reg) { 244 BuildMI(MBB, MBBI, DL, TII.get(MovRIOpc), Reg) 245 .addImm(Offset) 246 .setMIFlag(Flag); 247 MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII.get(AddSubRROpc), StackPtr) 248 .addReg(StackPtr) 249 .addReg(Reg); 250 MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead. 251 return; 252 } else if (Offset > 8 * Chunk) { 253 // If we would need more than 8 add or sub instructions (a >16GB stack 254 // frame), it's worth spilling RAX to materialize this immediate. 255 // pushq %rax 256 // movabsq +-$Offset+-SlotSize, %rax 257 // addq %rsp, %rax 258 // xchg %rax, (%rsp) 259 // movq (%rsp), %rsp 260 assert(Is64Bit && "can't have 32-bit 16GB stack frame"); 261 BuildMI(MBB, MBBI, DL, TII.get(X86::PUSH64r)) 262 .addReg(Rax, RegState::Kill) 263 .setMIFlag(Flag); 264 // Subtract is not commutative, so negate the offset and always use add. 265 // Subtract 8 less and add 8 more to account for the PUSH we just did. 266 if (isSub) 267 Offset = -(Offset - SlotSize); 268 else 269 Offset = Offset + SlotSize; 270 BuildMI(MBB, MBBI, DL, TII.get(MovRIOpc), Rax) 271 .addImm(Offset) 272 .setMIFlag(Flag); 273 MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII.get(X86::ADD64rr), Rax) 274 .addReg(Rax) 275 .addReg(StackPtr); 276 MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead. 277 // Exchange the new SP in RAX with the top of the stack. 278 addRegOffset( 279 BuildMI(MBB, MBBI, DL, TII.get(X86::XCHG64rm), Rax).addReg(Rax), 280 StackPtr, false, 0); 281 // Load new SP from the top of the stack into RSP. 282 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64rm), StackPtr), 283 StackPtr, false, 0); 284 return; 285 } 286 } 287 288 while (Offset) { 289 uint64_t ThisVal = std::min(Offset, Chunk); 290 if (ThisVal == SlotSize) { 291 // Use push / pop for slot sized adjustments as a size optimization. We 292 // need to find a dead register when using pop. 293 unsigned Reg = isSub 294 ? (unsigned)(Is64Bit ? X86::RAX : X86::EAX) 295 : TRI->findDeadCallerSavedReg(MBB, MBBI); 296 if (Reg) { 297 unsigned Opc = isSub 298 ? (Is64Bit ? X86::PUSH64r : X86::PUSH32r) 299 : (Is64Bit ? X86::POP64r : X86::POP32r); 300 BuildMI(MBB, MBBI, DL, TII.get(Opc)) 301 .addReg(Reg, getDefRegState(!isSub) | getUndefRegState(isSub)) 302 .setMIFlag(Flag); 303 Offset -= ThisVal; 304 continue; 305 } 306 } 307 308 BuildStackAdjustment(MBB, MBBI, DL, isSub ? -ThisVal : ThisVal, InEpilogue) 309 .setMIFlag(Flag); 310 311 Offset -= ThisVal; 312 } 313 } 314 315 MachineInstrBuilder X86FrameLowering::BuildStackAdjustment( 316 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, 317 const DebugLoc &DL, int64_t Offset, bool InEpilogue) const { 318 assert(Offset != 0 && "zero offset stack adjustment requested"); 319 320 // On Atom, using LEA to adjust SP is preferred, but using it in the epilogue 321 // is tricky. 322 bool UseLEA; 323 if (!InEpilogue) { 324 // Check if inserting the prologue at the beginning 325 // of MBB would require to use LEA operations. 326 // We need to use LEA operations if EFLAGS is live in, because 327 // it means an instruction will read it before it gets defined. 328 UseLEA = STI.useLeaForSP() || MBB.isLiveIn(X86::EFLAGS); 329 } else { 330 // If we can use LEA for SP but we shouldn't, check that none 331 // of the terminators uses the eflags. Otherwise we will insert 332 // a ADD that will redefine the eflags and break the condition. 333 // Alternatively, we could move the ADD, but this may not be possible 334 // and is an optimization anyway. 335 UseLEA = canUseLEAForSPInEpilogue(*MBB.getParent()); 336 if (UseLEA && !STI.useLeaForSP()) 337 UseLEA = flagsNeedToBePreservedBeforeTheTerminators(MBB); 338 // If that assert breaks, that means we do not do the right thing 339 // in canUseAsEpilogue. 340 assert((UseLEA || !flagsNeedToBePreservedBeforeTheTerminators(MBB)) && 341 "We shouldn't have allowed this insertion point"); 342 } 343 344 MachineInstrBuilder MI; 345 if (UseLEA) { 346 MI = addRegOffset(BuildMI(MBB, MBBI, DL, 347 TII.get(getLEArOpcode(Uses64BitFramePtr)), 348 StackPtr), 349 StackPtr, false, Offset); 350 } else { 351 bool IsSub = Offset < 0; 352 uint64_t AbsOffset = IsSub ? -Offset : Offset; 353 const unsigned Opc = IsSub ? getSUBriOpcode(Uses64BitFramePtr, AbsOffset) 354 : getADDriOpcode(Uses64BitFramePtr, AbsOffset); 355 MI = BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr) 356 .addReg(StackPtr) 357 .addImm(AbsOffset); 358 MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead. 359 } 360 return MI; 361 } 362 363 int X86FrameLowering::mergeSPUpdates(MachineBasicBlock &MBB, 364 MachineBasicBlock::iterator &MBBI, 365 bool doMergeWithPrevious) const { 366 if ((doMergeWithPrevious && MBBI == MBB.begin()) || 367 (!doMergeWithPrevious && MBBI == MBB.end())) 368 return 0; 369 370 MachineBasicBlock::iterator PI = doMergeWithPrevious ? std::prev(MBBI) : MBBI; 371 372 PI = skipDebugInstructionsBackward(PI, MBB.begin()); 373 // It is assumed that ADD/SUB/LEA instruction is succeded by one CFI 374 // instruction, and that there are no DBG_VALUE or other instructions between 375 // ADD/SUB/LEA and its corresponding CFI instruction. 376 /* TODO: Add support for the case where there are multiple CFI instructions 377 below the ADD/SUB/LEA, e.g.: 378 ... 379 add 380 cfi_def_cfa_offset 381 cfi_offset 382 ... 383 */ 384 if (doMergeWithPrevious && PI != MBB.begin() && PI->isCFIInstruction()) 385 PI = std::prev(PI); 386 387 unsigned Opc = PI->getOpcode(); 388 int Offset = 0; 389 390 if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 || 391 Opc == X86::ADD32ri || Opc == X86::ADD32ri8) && 392 PI->getOperand(0).getReg() == StackPtr){ 393 assert(PI->getOperand(1).getReg() == StackPtr); 394 Offset = PI->getOperand(2).getImm(); 395 } else if ((Opc == X86::LEA32r || Opc == X86::LEA64_32r) && 396 PI->getOperand(0).getReg() == StackPtr && 397 PI->getOperand(1).getReg() == StackPtr && 398 PI->getOperand(2).getImm() == 1 && 399 PI->getOperand(3).getReg() == X86::NoRegister && 400 PI->getOperand(5).getReg() == X86::NoRegister) { 401 // For LEAs we have: def = lea SP, FI, noreg, Offset, noreg. 402 Offset = PI->getOperand(4).getImm(); 403 } else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 || 404 Opc == X86::SUB32ri || Opc == X86::SUB32ri8) && 405 PI->getOperand(0).getReg() == StackPtr) { 406 assert(PI->getOperand(1).getReg() == StackPtr); 407 Offset = -PI->getOperand(2).getImm(); 408 } else 409 return 0; 410 411 PI = MBB.erase(PI); 412 if (PI != MBB.end() && PI->isCFIInstruction()) { 413 auto CIs = MBB.getParent()->getFrameInstructions(); 414 MCCFIInstruction CI = CIs[PI->getOperand(0).getCFIIndex()]; 415 if (CI.getOperation() == MCCFIInstruction::OpDefCfaOffset || 416 CI.getOperation() == MCCFIInstruction::OpAdjustCfaOffset) 417 PI = MBB.erase(PI); 418 } 419 if (!doMergeWithPrevious) 420 MBBI = skipDebugInstructionsForward(PI, MBB.end()); 421 422 return Offset; 423 } 424 425 void X86FrameLowering::BuildCFI(MachineBasicBlock &MBB, 426 MachineBasicBlock::iterator MBBI, 427 const DebugLoc &DL, 428 const MCCFIInstruction &CFIInst) const { 429 MachineFunction &MF = *MBB.getParent(); 430 unsigned CFIIndex = MF.addFrameInst(CFIInst); 431 BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION)) 432 .addCFIIndex(CFIIndex); 433 } 434 435 /// Emits Dwarf Info specifying offsets of callee saved registers and 436 /// frame pointer. This is called only when basic block sections are enabled. 437 void X86FrameLowering::emitCalleeSavedFrameMoves( 438 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI) const { 439 MachineFunction &MF = *MBB.getParent(); 440 if (!hasFP(MF)) { 441 emitCalleeSavedFrameMoves(MBB, MBBI, DebugLoc{}, true); 442 return; 443 } 444 const MachineModuleInfo &MMI = MF.getMMI(); 445 const MCRegisterInfo *MRI = MMI.getContext().getRegisterInfo(); 446 const Register FramePtr = TRI->getFrameRegister(MF); 447 const Register MachineFramePtr = 448 STI.isTarget64BitILP32() ? Register(getX86SubSuperRegister(FramePtr, 64)) 449 : FramePtr; 450 unsigned DwarfReg = MRI->getDwarfRegNum(MachineFramePtr, true); 451 // Offset = space for return address + size of the frame pointer itself. 452 unsigned Offset = (Is64Bit ? 8 : 4) + (Uses64BitFramePtr ? 8 : 4); 453 BuildCFI(MBB, MBBI, DebugLoc{}, 454 MCCFIInstruction::createOffset(nullptr, DwarfReg, -Offset)); 455 emitCalleeSavedFrameMoves(MBB, MBBI, DebugLoc{}, true); 456 } 457 458 void X86FrameLowering::emitCalleeSavedFrameMoves( 459 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, 460 const DebugLoc &DL, bool IsPrologue) const { 461 MachineFunction &MF = *MBB.getParent(); 462 MachineFrameInfo &MFI = MF.getFrameInfo(); 463 MachineModuleInfo &MMI = MF.getMMI(); 464 const MCRegisterInfo *MRI = MMI.getContext().getRegisterInfo(); 465 466 // Add callee saved registers to move list. 467 const std::vector<CalleeSavedInfo> &CSI = MFI.getCalleeSavedInfo(); 468 469 // Calculate offsets. 470 for (const CalleeSavedInfo &I : CSI) { 471 int64_t Offset = MFI.getObjectOffset(I.getFrameIdx()); 472 unsigned Reg = I.getReg(); 473 unsigned DwarfReg = MRI->getDwarfRegNum(Reg, true); 474 475 if (IsPrologue) { 476 BuildCFI(MBB, MBBI, DL, 477 MCCFIInstruction::createOffset(nullptr, DwarfReg, Offset)); 478 } else { 479 BuildCFI(MBB, MBBI, DL, 480 MCCFIInstruction::createRestore(nullptr, DwarfReg)); 481 } 482 } 483 } 484 485 void X86FrameLowering::emitStackProbe(MachineFunction &MF, 486 MachineBasicBlock &MBB, 487 MachineBasicBlock::iterator MBBI, 488 const DebugLoc &DL, bool InProlog) const { 489 const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>(); 490 if (STI.isTargetWindowsCoreCLR()) { 491 if (InProlog) { 492 BuildMI(MBB, MBBI, DL, TII.get(X86::STACKALLOC_W_PROBING)) 493 .addImm(0 /* no explicit stack size */); 494 } else { 495 emitStackProbeInline(MF, MBB, MBBI, DL, false); 496 } 497 } else { 498 emitStackProbeCall(MF, MBB, MBBI, DL, InProlog); 499 } 500 } 501 502 void X86FrameLowering::inlineStackProbe(MachineFunction &MF, 503 MachineBasicBlock &PrologMBB) const { 504 auto Where = llvm::find_if(PrologMBB, [](MachineInstr &MI) { 505 return MI.getOpcode() == X86::STACKALLOC_W_PROBING; 506 }); 507 if (Where != PrologMBB.end()) { 508 DebugLoc DL = PrologMBB.findDebugLoc(Where); 509 emitStackProbeInline(MF, PrologMBB, Where, DL, true); 510 Where->eraseFromParent(); 511 } 512 } 513 514 void X86FrameLowering::emitStackProbeInline(MachineFunction &MF, 515 MachineBasicBlock &MBB, 516 MachineBasicBlock::iterator MBBI, 517 const DebugLoc &DL, 518 bool InProlog) const { 519 const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>(); 520 if (STI.isTargetWindowsCoreCLR() && STI.is64Bit()) 521 emitStackProbeInlineWindowsCoreCLR64(MF, MBB, MBBI, DL, InProlog); 522 else 523 emitStackProbeInlineGeneric(MF, MBB, MBBI, DL, InProlog); 524 } 525 526 void X86FrameLowering::emitStackProbeInlineGeneric( 527 MachineFunction &MF, MachineBasicBlock &MBB, 528 MachineBasicBlock::iterator MBBI, const DebugLoc &DL, bool InProlog) const { 529 MachineInstr &AllocWithProbe = *MBBI; 530 uint64_t Offset = AllocWithProbe.getOperand(0).getImm(); 531 532 const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>(); 533 const X86TargetLowering &TLI = *STI.getTargetLowering(); 534 assert(!(STI.is64Bit() && STI.isTargetWindowsCoreCLR()) && 535 "different expansion expected for CoreCLR 64 bit"); 536 537 const uint64_t StackProbeSize = TLI.getStackProbeSize(MF); 538 uint64_t ProbeChunk = StackProbeSize * 8; 539 540 uint64_t MaxAlign = 541 TRI->hasStackRealignment(MF) ? calculateMaxStackAlign(MF) : 0; 542 543 // Synthesize a loop or unroll it, depending on the number of iterations. 544 // BuildStackAlignAND ensures that only MaxAlign % StackProbeSize bits left 545 // between the unaligned rsp and current rsp. 546 if (Offset > ProbeChunk) { 547 emitStackProbeInlineGenericLoop(MF, MBB, MBBI, DL, Offset, 548 MaxAlign % StackProbeSize); 549 } else { 550 emitStackProbeInlineGenericBlock(MF, MBB, MBBI, DL, Offset, 551 MaxAlign % StackProbeSize); 552 } 553 } 554 555 void X86FrameLowering::emitStackProbeInlineGenericBlock( 556 MachineFunction &MF, MachineBasicBlock &MBB, 557 MachineBasicBlock::iterator MBBI, const DebugLoc &DL, uint64_t Offset, 558 uint64_t AlignOffset) const { 559 560 const bool NeedsDwarfCFI = needsDwarfCFI(MF); 561 const bool HasFP = hasFP(MF); 562 const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>(); 563 const X86TargetLowering &TLI = *STI.getTargetLowering(); 564 const unsigned Opc = getSUBriOpcode(Uses64BitFramePtr, Offset); 565 const unsigned MovMIOpc = Is64Bit ? X86::MOV64mi32 : X86::MOV32mi; 566 const uint64_t StackProbeSize = TLI.getStackProbeSize(MF); 567 568 uint64_t CurrentOffset = 0; 569 570 assert(AlignOffset < StackProbeSize); 571 572 // If the offset is so small it fits within a page, there's nothing to do. 573 if (StackProbeSize < Offset + AlignOffset) { 574 575 MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr) 576 .addReg(StackPtr) 577 .addImm(StackProbeSize - AlignOffset) 578 .setMIFlag(MachineInstr::FrameSetup); 579 if (!HasFP && NeedsDwarfCFI) { 580 BuildCFI(MBB, MBBI, DL, 581 MCCFIInstruction::createAdjustCfaOffset( 582 nullptr, StackProbeSize - AlignOffset)); 583 } 584 MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead. 585 586 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(MovMIOpc)) 587 .setMIFlag(MachineInstr::FrameSetup), 588 StackPtr, false, 0) 589 .addImm(0) 590 .setMIFlag(MachineInstr::FrameSetup); 591 NumFrameExtraProbe++; 592 CurrentOffset = StackProbeSize - AlignOffset; 593 } 594 595 // For the next N - 1 pages, just probe. I tried to take advantage of 596 // natural probes but it implies much more logic and there was very few 597 // interesting natural probes to interleave. 598 while (CurrentOffset + StackProbeSize < Offset) { 599 MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr) 600 .addReg(StackPtr) 601 .addImm(StackProbeSize) 602 .setMIFlag(MachineInstr::FrameSetup); 603 MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead. 604 605 if (!HasFP && NeedsDwarfCFI) { 606 BuildCFI( 607 MBB, MBBI, DL, 608 MCCFIInstruction::createAdjustCfaOffset(nullptr, StackProbeSize)); 609 } 610 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(MovMIOpc)) 611 .setMIFlag(MachineInstr::FrameSetup), 612 StackPtr, false, 0) 613 .addImm(0) 614 .setMIFlag(MachineInstr::FrameSetup); 615 NumFrameExtraProbe++; 616 CurrentOffset += StackProbeSize; 617 } 618 619 // No need to probe the tail, it is smaller than a Page. 620 uint64_t ChunkSize = Offset - CurrentOffset; 621 MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr) 622 .addReg(StackPtr) 623 .addImm(ChunkSize) 624 .setMIFlag(MachineInstr::FrameSetup); 625 // No need to adjust Dwarf CFA offset here, the last position of the stack has 626 // been defined 627 MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead. 628 } 629 630 void X86FrameLowering::emitStackProbeInlineGenericLoop( 631 MachineFunction &MF, MachineBasicBlock &MBB, 632 MachineBasicBlock::iterator MBBI, const DebugLoc &DL, uint64_t Offset, 633 uint64_t AlignOffset) const { 634 assert(Offset && "null offset"); 635 636 const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>(); 637 const X86TargetLowering &TLI = *STI.getTargetLowering(); 638 const unsigned MovMIOpc = Is64Bit ? X86::MOV64mi32 : X86::MOV32mi; 639 const uint64_t StackProbeSize = TLI.getStackProbeSize(MF); 640 641 if (AlignOffset) { 642 if (AlignOffset < StackProbeSize) { 643 // Perform a first smaller allocation followed by a probe. 644 const unsigned SUBOpc = getSUBriOpcode(Uses64BitFramePtr, AlignOffset); 645 MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII.get(SUBOpc), StackPtr) 646 .addReg(StackPtr) 647 .addImm(AlignOffset) 648 .setMIFlag(MachineInstr::FrameSetup); 649 MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead. 650 651 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(MovMIOpc)) 652 .setMIFlag(MachineInstr::FrameSetup), 653 StackPtr, false, 0) 654 .addImm(0) 655 .setMIFlag(MachineInstr::FrameSetup); 656 NumFrameExtraProbe++; 657 Offset -= AlignOffset; 658 } 659 } 660 661 // Synthesize a loop 662 NumFrameLoopProbe++; 663 const BasicBlock *LLVM_BB = MBB.getBasicBlock(); 664 665 MachineBasicBlock *testMBB = MF.CreateMachineBasicBlock(LLVM_BB); 666 MachineBasicBlock *tailMBB = MF.CreateMachineBasicBlock(LLVM_BB); 667 668 MachineFunction::iterator MBBIter = ++MBB.getIterator(); 669 MF.insert(MBBIter, testMBB); 670 MF.insert(MBBIter, tailMBB); 671 672 Register FinalStackProbed = Uses64BitFramePtr ? X86::R11 673 : Is64Bit ? X86::R11D 674 : X86::EAX; 675 BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::COPY), FinalStackProbed) 676 .addReg(StackPtr) 677 .setMIFlag(MachineInstr::FrameSetup); 678 679 // save loop bound 680 { 681 const unsigned SUBOpc = getSUBriOpcode(Uses64BitFramePtr, Offset); 682 BuildMI(MBB, MBBI, DL, TII.get(SUBOpc), FinalStackProbed) 683 .addReg(FinalStackProbed) 684 .addImm(Offset / StackProbeSize * StackProbeSize) 685 .setMIFlag(MachineInstr::FrameSetup); 686 } 687 688 // allocate a page 689 { 690 const unsigned SUBOpc = getSUBriOpcode(Uses64BitFramePtr, StackProbeSize); 691 BuildMI(testMBB, DL, TII.get(SUBOpc), StackPtr) 692 .addReg(StackPtr) 693 .addImm(StackProbeSize) 694 .setMIFlag(MachineInstr::FrameSetup); 695 } 696 697 // touch the page 698 addRegOffset(BuildMI(testMBB, DL, TII.get(MovMIOpc)) 699 .setMIFlag(MachineInstr::FrameSetup), 700 StackPtr, false, 0) 701 .addImm(0) 702 .setMIFlag(MachineInstr::FrameSetup); 703 704 // cmp with stack pointer bound 705 BuildMI(testMBB, DL, TII.get(Uses64BitFramePtr ? X86::CMP64rr : X86::CMP32rr)) 706 .addReg(StackPtr) 707 .addReg(FinalStackProbed) 708 .setMIFlag(MachineInstr::FrameSetup); 709 710 // jump 711 BuildMI(testMBB, DL, TII.get(X86::JCC_1)) 712 .addMBB(testMBB) 713 .addImm(X86::COND_NE) 714 .setMIFlag(MachineInstr::FrameSetup); 715 testMBB->addSuccessor(testMBB); 716 testMBB->addSuccessor(tailMBB); 717 718 // BB management 719 tailMBB->splice(tailMBB->end(), &MBB, MBBI, MBB.end()); 720 tailMBB->transferSuccessorsAndUpdatePHIs(&MBB); 721 MBB.addSuccessor(testMBB); 722 723 // handle tail 724 unsigned TailOffset = Offset % StackProbeSize; 725 if (TailOffset) { 726 const unsigned Opc = getSUBriOpcode(Uses64BitFramePtr, TailOffset); 727 BuildMI(*tailMBB, tailMBB->begin(), DL, TII.get(Opc), StackPtr) 728 .addReg(StackPtr) 729 .addImm(TailOffset) 730 .setMIFlag(MachineInstr::FrameSetup); 731 } 732 733 // Update Live In information 734 recomputeLiveIns(*testMBB); 735 recomputeLiveIns(*tailMBB); 736 } 737 738 void X86FrameLowering::emitStackProbeInlineWindowsCoreCLR64( 739 MachineFunction &MF, MachineBasicBlock &MBB, 740 MachineBasicBlock::iterator MBBI, const DebugLoc &DL, bool InProlog) const { 741 const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>(); 742 assert(STI.is64Bit() && "different expansion needed for 32 bit"); 743 assert(STI.isTargetWindowsCoreCLR() && "custom expansion expects CoreCLR"); 744 const TargetInstrInfo &TII = *STI.getInstrInfo(); 745 const BasicBlock *LLVM_BB = MBB.getBasicBlock(); 746 747 // RAX contains the number of bytes of desired stack adjustment. 748 // The handling here assumes this value has already been updated so as to 749 // maintain stack alignment. 750 // 751 // We need to exit with RSP modified by this amount and execute suitable 752 // page touches to notify the OS that we're growing the stack responsibly. 753 // All stack probing must be done without modifying RSP. 754 // 755 // MBB: 756 // SizeReg = RAX; 757 // ZeroReg = 0 758 // CopyReg = RSP 759 // Flags, TestReg = CopyReg - SizeReg 760 // FinalReg = !Flags.Ovf ? TestReg : ZeroReg 761 // LimitReg = gs magic thread env access 762 // if FinalReg >= LimitReg goto ContinueMBB 763 // RoundBB: 764 // RoundReg = page address of FinalReg 765 // LoopMBB: 766 // LoopReg = PHI(LimitReg,ProbeReg) 767 // ProbeReg = LoopReg - PageSize 768 // [ProbeReg] = 0 769 // if (ProbeReg > RoundReg) goto LoopMBB 770 // ContinueMBB: 771 // RSP = RSP - RAX 772 // [rest of original MBB] 773 774 // Set up the new basic blocks 775 MachineBasicBlock *RoundMBB = MF.CreateMachineBasicBlock(LLVM_BB); 776 MachineBasicBlock *LoopMBB = MF.CreateMachineBasicBlock(LLVM_BB); 777 MachineBasicBlock *ContinueMBB = MF.CreateMachineBasicBlock(LLVM_BB); 778 779 MachineFunction::iterator MBBIter = std::next(MBB.getIterator()); 780 MF.insert(MBBIter, RoundMBB); 781 MF.insert(MBBIter, LoopMBB); 782 MF.insert(MBBIter, ContinueMBB); 783 784 // Split MBB and move the tail portion down to ContinueMBB. 785 MachineBasicBlock::iterator BeforeMBBI = std::prev(MBBI); 786 ContinueMBB->splice(ContinueMBB->begin(), &MBB, MBBI, MBB.end()); 787 ContinueMBB->transferSuccessorsAndUpdatePHIs(&MBB); 788 789 // Some useful constants 790 const int64_t ThreadEnvironmentStackLimit = 0x10; 791 const int64_t PageSize = 0x1000; 792 const int64_t PageMask = ~(PageSize - 1); 793 794 // Registers we need. For the normal case we use virtual 795 // registers. For the prolog expansion we use RAX, RCX and RDX. 796 MachineRegisterInfo &MRI = MF.getRegInfo(); 797 const TargetRegisterClass *RegClass = &X86::GR64RegClass; 798 const Register SizeReg = InProlog ? X86::RAX 799 : MRI.createVirtualRegister(RegClass), 800 ZeroReg = InProlog ? X86::RCX 801 : MRI.createVirtualRegister(RegClass), 802 CopyReg = InProlog ? X86::RDX 803 : MRI.createVirtualRegister(RegClass), 804 TestReg = InProlog ? X86::RDX 805 : MRI.createVirtualRegister(RegClass), 806 FinalReg = InProlog ? X86::RDX 807 : MRI.createVirtualRegister(RegClass), 808 RoundedReg = InProlog ? X86::RDX 809 : MRI.createVirtualRegister(RegClass), 810 LimitReg = InProlog ? X86::RCX 811 : MRI.createVirtualRegister(RegClass), 812 JoinReg = InProlog ? X86::RCX 813 : MRI.createVirtualRegister(RegClass), 814 ProbeReg = InProlog ? X86::RCX 815 : MRI.createVirtualRegister(RegClass); 816 817 // SP-relative offsets where we can save RCX and RDX. 818 int64_t RCXShadowSlot = 0; 819 int64_t RDXShadowSlot = 0; 820 821 // If inlining in the prolog, save RCX and RDX. 822 if (InProlog) { 823 // Compute the offsets. We need to account for things already 824 // pushed onto the stack at this point: return address, frame 825 // pointer (if used), and callee saves. 826 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); 827 const int64_t CalleeSaveSize = X86FI->getCalleeSavedFrameSize(); 828 const bool HasFP = hasFP(MF); 829 830 // Check if we need to spill RCX and/or RDX. 831 // Here we assume that no earlier prologue instruction changes RCX and/or 832 // RDX, so checking the block live-ins is enough. 833 const bool IsRCXLiveIn = MBB.isLiveIn(X86::RCX); 834 const bool IsRDXLiveIn = MBB.isLiveIn(X86::RDX); 835 int64_t InitSlot = 8 + CalleeSaveSize + (HasFP ? 8 : 0); 836 // Assign the initial slot to both registers, then change RDX's slot if both 837 // need to be spilled. 838 if (IsRCXLiveIn) 839 RCXShadowSlot = InitSlot; 840 if (IsRDXLiveIn) 841 RDXShadowSlot = InitSlot; 842 if (IsRDXLiveIn && IsRCXLiveIn) 843 RDXShadowSlot += 8; 844 // Emit the saves if needed. 845 if (IsRCXLiveIn) 846 addRegOffset(BuildMI(&MBB, DL, TII.get(X86::MOV64mr)), X86::RSP, false, 847 RCXShadowSlot) 848 .addReg(X86::RCX); 849 if (IsRDXLiveIn) 850 addRegOffset(BuildMI(&MBB, DL, TII.get(X86::MOV64mr)), X86::RSP, false, 851 RDXShadowSlot) 852 .addReg(X86::RDX); 853 } else { 854 // Not in the prolog. Copy RAX to a virtual reg. 855 BuildMI(&MBB, DL, TII.get(X86::MOV64rr), SizeReg).addReg(X86::RAX); 856 } 857 858 // Add code to MBB to check for overflow and set the new target stack pointer 859 // to zero if so. 860 BuildMI(&MBB, DL, TII.get(X86::XOR64rr), ZeroReg) 861 .addReg(ZeroReg, RegState::Undef) 862 .addReg(ZeroReg, RegState::Undef); 863 BuildMI(&MBB, DL, TII.get(X86::MOV64rr), CopyReg).addReg(X86::RSP); 864 BuildMI(&MBB, DL, TII.get(X86::SUB64rr), TestReg) 865 .addReg(CopyReg) 866 .addReg(SizeReg); 867 BuildMI(&MBB, DL, TII.get(X86::CMOV64rr), FinalReg) 868 .addReg(TestReg) 869 .addReg(ZeroReg) 870 .addImm(X86::COND_B); 871 872 // FinalReg now holds final stack pointer value, or zero if 873 // allocation would overflow. Compare against the current stack 874 // limit from the thread environment block. Note this limit is the 875 // lowest touched page on the stack, not the point at which the OS 876 // will cause an overflow exception, so this is just an optimization 877 // to avoid unnecessarily touching pages that are below the current 878 // SP but already committed to the stack by the OS. 879 BuildMI(&MBB, DL, TII.get(X86::MOV64rm), LimitReg) 880 .addReg(0) 881 .addImm(1) 882 .addReg(0) 883 .addImm(ThreadEnvironmentStackLimit) 884 .addReg(X86::GS); 885 BuildMI(&MBB, DL, TII.get(X86::CMP64rr)).addReg(FinalReg).addReg(LimitReg); 886 // Jump if the desired stack pointer is at or above the stack limit. 887 BuildMI(&MBB, DL, TII.get(X86::JCC_1)).addMBB(ContinueMBB).addImm(X86::COND_AE); 888 889 // Add code to roundMBB to round the final stack pointer to a page boundary. 890 RoundMBB->addLiveIn(FinalReg); 891 BuildMI(RoundMBB, DL, TII.get(X86::AND64ri32), RoundedReg) 892 .addReg(FinalReg) 893 .addImm(PageMask); 894 BuildMI(RoundMBB, DL, TII.get(X86::JMP_1)).addMBB(LoopMBB); 895 896 // LimitReg now holds the current stack limit, RoundedReg page-rounded 897 // final RSP value. Add code to loopMBB to decrement LimitReg page-by-page 898 // and probe until we reach RoundedReg. 899 if (!InProlog) { 900 BuildMI(LoopMBB, DL, TII.get(X86::PHI), JoinReg) 901 .addReg(LimitReg) 902 .addMBB(RoundMBB) 903 .addReg(ProbeReg) 904 .addMBB(LoopMBB); 905 } 906 907 LoopMBB->addLiveIn(JoinReg); 908 addRegOffset(BuildMI(LoopMBB, DL, TII.get(X86::LEA64r), ProbeReg), JoinReg, 909 false, -PageSize); 910 911 // Probe by storing a byte onto the stack. 912 BuildMI(LoopMBB, DL, TII.get(X86::MOV8mi)) 913 .addReg(ProbeReg) 914 .addImm(1) 915 .addReg(0) 916 .addImm(0) 917 .addReg(0) 918 .addImm(0); 919 920 LoopMBB->addLiveIn(RoundedReg); 921 BuildMI(LoopMBB, DL, TII.get(X86::CMP64rr)) 922 .addReg(RoundedReg) 923 .addReg(ProbeReg); 924 BuildMI(LoopMBB, DL, TII.get(X86::JCC_1)).addMBB(LoopMBB).addImm(X86::COND_NE); 925 926 MachineBasicBlock::iterator ContinueMBBI = ContinueMBB->getFirstNonPHI(); 927 928 // If in prolog, restore RDX and RCX. 929 if (InProlog) { 930 if (RCXShadowSlot) // It means we spilled RCX in the prologue. 931 addRegOffset(BuildMI(*ContinueMBB, ContinueMBBI, DL, 932 TII.get(X86::MOV64rm), X86::RCX), 933 X86::RSP, false, RCXShadowSlot); 934 if (RDXShadowSlot) // It means we spilled RDX in the prologue. 935 addRegOffset(BuildMI(*ContinueMBB, ContinueMBBI, DL, 936 TII.get(X86::MOV64rm), X86::RDX), 937 X86::RSP, false, RDXShadowSlot); 938 } 939 940 // Now that the probing is done, add code to continueMBB to update 941 // the stack pointer for real. 942 ContinueMBB->addLiveIn(SizeReg); 943 BuildMI(*ContinueMBB, ContinueMBBI, DL, TII.get(X86::SUB64rr), X86::RSP) 944 .addReg(X86::RSP) 945 .addReg(SizeReg); 946 947 // Add the control flow edges we need. 948 MBB.addSuccessor(ContinueMBB); 949 MBB.addSuccessor(RoundMBB); 950 RoundMBB->addSuccessor(LoopMBB); 951 LoopMBB->addSuccessor(ContinueMBB); 952 LoopMBB->addSuccessor(LoopMBB); 953 954 // Mark all the instructions added to the prolog as frame setup. 955 if (InProlog) { 956 for (++BeforeMBBI; BeforeMBBI != MBB.end(); ++BeforeMBBI) { 957 BeforeMBBI->setFlag(MachineInstr::FrameSetup); 958 } 959 for (MachineInstr &MI : *RoundMBB) { 960 MI.setFlag(MachineInstr::FrameSetup); 961 } 962 for (MachineInstr &MI : *LoopMBB) { 963 MI.setFlag(MachineInstr::FrameSetup); 964 } 965 for (MachineBasicBlock::iterator CMBBI = ContinueMBB->begin(); 966 CMBBI != ContinueMBBI; ++CMBBI) { 967 CMBBI->setFlag(MachineInstr::FrameSetup); 968 } 969 } 970 } 971 972 void X86FrameLowering::emitStackProbeCall(MachineFunction &MF, 973 MachineBasicBlock &MBB, 974 MachineBasicBlock::iterator MBBI, 975 const DebugLoc &DL, 976 bool InProlog) const { 977 bool IsLargeCodeModel = MF.getTarget().getCodeModel() == CodeModel::Large; 978 979 // FIXME: Add indirect thunk support and remove this. 980 if (Is64Bit && IsLargeCodeModel && STI.useIndirectThunkCalls()) 981 report_fatal_error("Emitting stack probe calls on 64-bit with the large " 982 "code model and indirect thunks not yet implemented."); 983 984 unsigned CallOp; 985 if (Is64Bit) 986 CallOp = IsLargeCodeModel ? X86::CALL64r : X86::CALL64pcrel32; 987 else 988 CallOp = X86::CALLpcrel32; 989 990 StringRef Symbol = STI.getTargetLowering()->getStackProbeSymbolName(MF); 991 992 MachineInstrBuilder CI; 993 MachineBasicBlock::iterator ExpansionMBBI = std::prev(MBBI); 994 995 // All current stack probes take AX and SP as input, clobber flags, and 996 // preserve all registers. x86_64 probes leave RSP unmodified. 997 if (Is64Bit && MF.getTarget().getCodeModel() == CodeModel::Large) { 998 // For the large code model, we have to call through a register. Use R11, 999 // as it is scratch in all supported calling conventions. 1000 BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64ri), X86::R11) 1001 .addExternalSymbol(MF.createExternalSymbolName(Symbol)); 1002 CI = BuildMI(MBB, MBBI, DL, TII.get(CallOp)).addReg(X86::R11); 1003 } else { 1004 CI = BuildMI(MBB, MBBI, DL, TII.get(CallOp)) 1005 .addExternalSymbol(MF.createExternalSymbolName(Symbol)); 1006 } 1007 1008 unsigned AX = Uses64BitFramePtr ? X86::RAX : X86::EAX; 1009 unsigned SP = Uses64BitFramePtr ? X86::RSP : X86::ESP; 1010 CI.addReg(AX, RegState::Implicit) 1011 .addReg(SP, RegState::Implicit) 1012 .addReg(AX, RegState::Define | RegState::Implicit) 1013 .addReg(SP, RegState::Define | RegState::Implicit) 1014 .addReg(X86::EFLAGS, RegState::Define | RegState::Implicit); 1015 1016 if (STI.isTargetWin64() || !STI.isOSWindows()) { 1017 // MSVC x32's _chkstk and cygwin/mingw's _alloca adjust %esp themselves. 1018 // MSVC x64's __chkstk and cygwin/mingw's ___chkstk_ms do not adjust %rsp 1019 // themselves. They also does not clobber %rax so we can reuse it when 1020 // adjusting %rsp. 1021 // All other platforms do not specify a particular ABI for the stack probe 1022 // function, so we arbitrarily define it to not adjust %esp/%rsp itself. 1023 BuildMI(MBB, MBBI, DL, TII.get(getSUBrrOpcode(Uses64BitFramePtr)), SP) 1024 .addReg(SP) 1025 .addReg(AX); 1026 } 1027 1028 if (InProlog) { 1029 // Apply the frame setup flag to all inserted instrs. 1030 for (++ExpansionMBBI; ExpansionMBBI != MBBI; ++ExpansionMBBI) 1031 ExpansionMBBI->setFlag(MachineInstr::FrameSetup); 1032 } 1033 } 1034 1035 static unsigned calculateSetFPREG(uint64_t SPAdjust) { 1036 // Win64 ABI has a less restrictive limitation of 240; 128 works equally well 1037 // and might require smaller successive adjustments. 1038 const uint64_t Win64MaxSEHOffset = 128; 1039 uint64_t SEHFrameOffset = std::min(SPAdjust, Win64MaxSEHOffset); 1040 // Win64 ABI requires 16-byte alignment for the UWOP_SET_FPREG opcode. 1041 return SEHFrameOffset & -16; 1042 } 1043 1044 // If we're forcing a stack realignment we can't rely on just the frame 1045 // info, we need to know the ABI stack alignment as well in case we 1046 // have a call out. Otherwise just make sure we have some alignment - we'll 1047 // go with the minimum SlotSize. 1048 uint64_t X86FrameLowering::calculateMaxStackAlign(const MachineFunction &MF) const { 1049 const MachineFrameInfo &MFI = MF.getFrameInfo(); 1050 Align MaxAlign = MFI.getMaxAlign(); // Desired stack alignment. 1051 Align StackAlign = getStackAlign(); 1052 if (MF.getFunction().hasFnAttribute("stackrealign")) { 1053 if (MFI.hasCalls()) 1054 MaxAlign = (StackAlign > MaxAlign) ? StackAlign : MaxAlign; 1055 else if (MaxAlign < SlotSize) 1056 MaxAlign = Align(SlotSize); 1057 } 1058 return MaxAlign.value(); 1059 } 1060 1061 void X86FrameLowering::BuildStackAlignAND(MachineBasicBlock &MBB, 1062 MachineBasicBlock::iterator MBBI, 1063 const DebugLoc &DL, unsigned Reg, 1064 uint64_t MaxAlign) const { 1065 uint64_t Val = -MaxAlign; 1066 unsigned AndOp = getANDriOpcode(Uses64BitFramePtr, Val); 1067 1068 MachineFunction &MF = *MBB.getParent(); 1069 const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>(); 1070 const X86TargetLowering &TLI = *STI.getTargetLowering(); 1071 const uint64_t StackProbeSize = TLI.getStackProbeSize(MF); 1072 const bool EmitInlineStackProbe = TLI.hasInlineStackProbe(MF); 1073 1074 // We want to make sure that (in worst case) less than StackProbeSize bytes 1075 // are not probed after the AND. This assumption is used in 1076 // emitStackProbeInlineGeneric. 1077 if (Reg == StackPtr && EmitInlineStackProbe && MaxAlign >= StackProbeSize) { 1078 { 1079 NumFrameLoopProbe++; 1080 MachineBasicBlock *entryMBB = 1081 MF.CreateMachineBasicBlock(MBB.getBasicBlock()); 1082 MachineBasicBlock *headMBB = 1083 MF.CreateMachineBasicBlock(MBB.getBasicBlock()); 1084 MachineBasicBlock *bodyMBB = 1085 MF.CreateMachineBasicBlock(MBB.getBasicBlock()); 1086 MachineBasicBlock *footMBB = 1087 MF.CreateMachineBasicBlock(MBB.getBasicBlock()); 1088 1089 MachineFunction::iterator MBBIter = MBB.getIterator(); 1090 MF.insert(MBBIter, entryMBB); 1091 MF.insert(MBBIter, headMBB); 1092 MF.insert(MBBIter, bodyMBB); 1093 MF.insert(MBBIter, footMBB); 1094 const unsigned MovMIOpc = Is64Bit ? X86::MOV64mi32 : X86::MOV32mi; 1095 Register FinalStackProbed = Uses64BitFramePtr ? X86::R11 1096 : Is64Bit ? X86::R11D 1097 : X86::EAX; 1098 1099 // Setup entry block 1100 { 1101 1102 entryMBB->splice(entryMBB->end(), &MBB, MBB.begin(), MBBI); 1103 BuildMI(entryMBB, DL, TII.get(TargetOpcode::COPY), FinalStackProbed) 1104 .addReg(StackPtr) 1105 .setMIFlag(MachineInstr::FrameSetup); 1106 MachineInstr *MI = 1107 BuildMI(entryMBB, DL, TII.get(AndOp), FinalStackProbed) 1108 .addReg(FinalStackProbed) 1109 .addImm(Val) 1110 .setMIFlag(MachineInstr::FrameSetup); 1111 1112 // The EFLAGS implicit def is dead. 1113 MI->getOperand(3).setIsDead(); 1114 1115 BuildMI(entryMBB, DL, 1116 TII.get(Uses64BitFramePtr ? X86::CMP64rr : X86::CMP32rr)) 1117 .addReg(FinalStackProbed) 1118 .addReg(StackPtr) 1119 .setMIFlag(MachineInstr::FrameSetup); 1120 BuildMI(entryMBB, DL, TII.get(X86::JCC_1)) 1121 .addMBB(&MBB) 1122 .addImm(X86::COND_E) 1123 .setMIFlag(MachineInstr::FrameSetup); 1124 entryMBB->addSuccessor(headMBB); 1125 entryMBB->addSuccessor(&MBB); 1126 } 1127 1128 // Loop entry block 1129 1130 { 1131 const unsigned SUBOpc = 1132 getSUBriOpcode(Uses64BitFramePtr, StackProbeSize); 1133 BuildMI(headMBB, DL, TII.get(SUBOpc), StackPtr) 1134 .addReg(StackPtr) 1135 .addImm(StackProbeSize) 1136 .setMIFlag(MachineInstr::FrameSetup); 1137 1138 BuildMI(headMBB, DL, 1139 TII.get(Uses64BitFramePtr ? X86::CMP64rr : X86::CMP32rr)) 1140 .addReg(FinalStackProbed) 1141 .addReg(StackPtr) 1142 .setMIFlag(MachineInstr::FrameSetup); 1143 1144 // jump 1145 BuildMI(headMBB, DL, TII.get(X86::JCC_1)) 1146 .addMBB(footMBB) 1147 .addImm(X86::COND_B) 1148 .setMIFlag(MachineInstr::FrameSetup); 1149 1150 headMBB->addSuccessor(bodyMBB); 1151 headMBB->addSuccessor(footMBB); 1152 } 1153 1154 // setup loop body 1155 { 1156 addRegOffset(BuildMI(bodyMBB, DL, TII.get(MovMIOpc)) 1157 .setMIFlag(MachineInstr::FrameSetup), 1158 StackPtr, false, 0) 1159 .addImm(0) 1160 .setMIFlag(MachineInstr::FrameSetup); 1161 1162 const unsigned SUBOpc = 1163 getSUBriOpcode(Uses64BitFramePtr, StackProbeSize); 1164 BuildMI(bodyMBB, DL, TII.get(SUBOpc), StackPtr) 1165 .addReg(StackPtr) 1166 .addImm(StackProbeSize) 1167 .setMIFlag(MachineInstr::FrameSetup); 1168 1169 // cmp with stack pointer bound 1170 BuildMI(bodyMBB, DL, 1171 TII.get(Uses64BitFramePtr ? X86::CMP64rr : X86::CMP32rr)) 1172 .addReg(FinalStackProbed) 1173 .addReg(StackPtr) 1174 .setMIFlag(MachineInstr::FrameSetup); 1175 1176 // jump 1177 BuildMI(bodyMBB, DL, TII.get(X86::JCC_1)) 1178 .addMBB(bodyMBB) 1179 .addImm(X86::COND_B) 1180 .setMIFlag(MachineInstr::FrameSetup); 1181 bodyMBB->addSuccessor(bodyMBB); 1182 bodyMBB->addSuccessor(footMBB); 1183 } 1184 1185 // setup loop footer 1186 { 1187 BuildMI(footMBB, DL, TII.get(TargetOpcode::COPY), StackPtr) 1188 .addReg(FinalStackProbed) 1189 .setMIFlag(MachineInstr::FrameSetup); 1190 addRegOffset(BuildMI(footMBB, DL, TII.get(MovMIOpc)) 1191 .setMIFlag(MachineInstr::FrameSetup), 1192 StackPtr, false, 0) 1193 .addImm(0) 1194 .setMIFlag(MachineInstr::FrameSetup); 1195 footMBB->addSuccessor(&MBB); 1196 } 1197 1198 recomputeLiveIns(*headMBB); 1199 recomputeLiveIns(*bodyMBB); 1200 recomputeLiveIns(*footMBB); 1201 recomputeLiveIns(MBB); 1202 } 1203 } else { 1204 MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII.get(AndOp), Reg) 1205 .addReg(Reg) 1206 .addImm(Val) 1207 .setMIFlag(MachineInstr::FrameSetup); 1208 1209 // The EFLAGS implicit def is dead. 1210 MI->getOperand(3).setIsDead(); 1211 } 1212 } 1213 1214 bool X86FrameLowering::has128ByteRedZone(const MachineFunction& MF) const { 1215 // x86-64 (non Win64) has a 128 byte red zone which is guaranteed not to be 1216 // clobbered by any interrupt handler. 1217 assert(&STI == &MF.getSubtarget<X86Subtarget>() && 1218 "MF used frame lowering for wrong subtarget"); 1219 const Function &Fn = MF.getFunction(); 1220 const bool IsWin64CC = STI.isCallingConvWin64(Fn.getCallingConv()); 1221 return Is64Bit && !IsWin64CC && !Fn.hasFnAttribute(Attribute::NoRedZone); 1222 } 1223 1224 bool X86FrameLowering::isWin64Prologue(const MachineFunction &MF) const { 1225 return MF.getTarget().getMCAsmInfo()->usesWindowsCFI(); 1226 } 1227 1228 bool X86FrameLowering::needsDwarfCFI(const MachineFunction &MF) const { 1229 return !isWin64Prologue(MF) && MF.needsFrameMoves(); 1230 } 1231 1232 /// emitPrologue - Push callee-saved registers onto the stack, which 1233 /// automatically adjust the stack pointer. Adjust the stack pointer to allocate 1234 /// space for local variables. Also emit labels used by the exception handler to 1235 /// generate the exception handling frames. 1236 1237 /* 1238 Here's a gist of what gets emitted: 1239 1240 ; Establish frame pointer, if needed 1241 [if needs FP] 1242 push %rbp 1243 .cfi_def_cfa_offset 16 1244 .cfi_offset %rbp, -16 1245 .seh_pushreg %rpb 1246 mov %rsp, %rbp 1247 .cfi_def_cfa_register %rbp 1248 1249 ; Spill general-purpose registers 1250 [for all callee-saved GPRs] 1251 pushq %<reg> 1252 [if not needs FP] 1253 .cfi_def_cfa_offset (offset from RETADDR) 1254 .seh_pushreg %<reg> 1255 1256 ; If the required stack alignment > default stack alignment 1257 ; rsp needs to be re-aligned. This creates a "re-alignment gap" 1258 ; of unknown size in the stack frame. 1259 [if stack needs re-alignment] 1260 and $MASK, %rsp 1261 1262 ; Allocate space for locals 1263 [if target is Windows and allocated space > 4096 bytes] 1264 ; Windows needs special care for allocations larger 1265 ; than one page. 1266 mov $NNN, %rax 1267 call ___chkstk_ms/___chkstk 1268 sub %rax, %rsp 1269 [else] 1270 sub $NNN, %rsp 1271 1272 [if needs FP] 1273 .seh_stackalloc (size of XMM spill slots) 1274 .seh_setframe %rbp, SEHFrameOffset ; = size of all spill slots 1275 [else] 1276 .seh_stackalloc NNN 1277 1278 ; Spill XMMs 1279 ; Note, that while only Windows 64 ABI specifies XMMs as callee-preserved, 1280 ; they may get spilled on any platform, if the current function 1281 ; calls @llvm.eh.unwind.init 1282 [if needs FP] 1283 [for all callee-saved XMM registers] 1284 movaps %<xmm reg>, -MMM(%rbp) 1285 [for all callee-saved XMM registers] 1286 .seh_savexmm %<xmm reg>, (-MMM + SEHFrameOffset) 1287 ; i.e. the offset relative to (%rbp - SEHFrameOffset) 1288 [else] 1289 [for all callee-saved XMM registers] 1290 movaps %<xmm reg>, KKK(%rsp) 1291 [for all callee-saved XMM registers] 1292 .seh_savexmm %<xmm reg>, KKK 1293 1294 .seh_endprologue 1295 1296 [if needs base pointer] 1297 mov %rsp, %rbx 1298 [if needs to restore base pointer] 1299 mov %rsp, -MMM(%rbp) 1300 1301 ; Emit CFI info 1302 [if needs FP] 1303 [for all callee-saved registers] 1304 .cfi_offset %<reg>, (offset from %rbp) 1305 [else] 1306 .cfi_def_cfa_offset (offset from RETADDR) 1307 [for all callee-saved registers] 1308 .cfi_offset %<reg>, (offset from %rsp) 1309 1310 Notes: 1311 - .seh directives are emitted only for Windows 64 ABI 1312 - .cv_fpo directives are emitted on win32 when emitting CodeView 1313 - .cfi directives are emitted for all other ABIs 1314 - for 32-bit code, substitute %e?? registers for %r?? 1315 */ 1316 1317 void X86FrameLowering::emitPrologue(MachineFunction &MF, 1318 MachineBasicBlock &MBB) const { 1319 assert(&STI == &MF.getSubtarget<X86Subtarget>() && 1320 "MF used frame lowering for wrong subtarget"); 1321 MachineBasicBlock::iterator MBBI = MBB.begin(); 1322 MachineFrameInfo &MFI = MF.getFrameInfo(); 1323 const Function &Fn = MF.getFunction(); 1324 MachineModuleInfo &MMI = MF.getMMI(); 1325 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); 1326 uint64_t MaxAlign = calculateMaxStackAlign(MF); // Desired stack alignment. 1327 uint64_t StackSize = MFI.getStackSize(); // Number of bytes to allocate. 1328 bool IsFunclet = MBB.isEHFuncletEntry(); 1329 EHPersonality Personality = EHPersonality::Unknown; 1330 if (Fn.hasPersonalityFn()) 1331 Personality = classifyEHPersonality(Fn.getPersonalityFn()); 1332 bool FnHasClrFunclet = 1333 MF.hasEHFunclets() && Personality == EHPersonality::CoreCLR; 1334 bool IsClrFunclet = IsFunclet && FnHasClrFunclet; 1335 bool HasFP = hasFP(MF); 1336 bool IsWin64Prologue = isWin64Prologue(MF); 1337 bool NeedsWin64CFI = IsWin64Prologue && Fn.needsUnwindTableEntry(); 1338 // FIXME: Emit FPO data for EH funclets. 1339 bool NeedsWinFPO = 1340 !IsFunclet && STI.isTargetWin32() && MMI.getModule()->getCodeViewFlag(); 1341 bool NeedsWinCFI = NeedsWin64CFI || NeedsWinFPO; 1342 bool NeedsDwarfCFI = needsDwarfCFI(MF); 1343 Register FramePtr = TRI->getFrameRegister(MF); 1344 const Register MachineFramePtr = 1345 STI.isTarget64BitILP32() 1346 ? Register(getX86SubSuperRegister(FramePtr, 64)) : FramePtr; 1347 Register BasePtr = TRI->getBaseRegister(); 1348 bool HasWinCFI = false; 1349 1350 // Debug location must be unknown since the first debug location is used 1351 // to determine the end of the prologue. 1352 DebugLoc DL; 1353 1354 // Space reserved for stack-based arguments when making a (ABI-guaranteed) 1355 // tail call. 1356 unsigned TailCallArgReserveSize = -X86FI->getTCReturnAddrDelta(); 1357 if (TailCallArgReserveSize && IsWin64Prologue) 1358 report_fatal_error("Can't handle guaranteed tail call under win64 yet"); 1359 1360 const bool EmitStackProbeCall = 1361 STI.getTargetLowering()->hasStackProbeSymbol(MF); 1362 unsigned StackProbeSize = STI.getTargetLowering()->getStackProbeSize(MF); 1363 1364 if (HasFP && X86FI->hasSwiftAsyncContext()) { 1365 switch (MF.getTarget().Options.SwiftAsyncFramePointer) { 1366 case SwiftAsyncFramePointerMode::DeploymentBased: 1367 if (STI.swiftAsyncContextIsDynamicallySet()) { 1368 // The special symbol below is absolute and has a *value* suitable to be 1369 // combined with the frame pointer directly. 1370 BuildMI(MBB, MBBI, DL, TII.get(X86::OR64rm), MachineFramePtr) 1371 .addUse(MachineFramePtr) 1372 .addUse(X86::RIP) 1373 .addImm(1) 1374 .addUse(X86::NoRegister) 1375 .addExternalSymbol("swift_async_extendedFramePointerFlags", 1376 X86II::MO_GOTPCREL) 1377 .addUse(X86::NoRegister); 1378 break; 1379 } 1380 LLVM_FALLTHROUGH; 1381 1382 case SwiftAsyncFramePointerMode::Always: 1383 BuildMI(MBB, MBBI, DL, TII.get(X86::BTS64ri8), MachineFramePtr) 1384 .addUse(MachineFramePtr) 1385 .addImm(60) 1386 .setMIFlag(MachineInstr::FrameSetup); 1387 break; 1388 1389 case SwiftAsyncFramePointerMode::Never: 1390 break; 1391 } 1392 } 1393 1394 // Re-align the stack on 64-bit if the x86-interrupt calling convention is 1395 // used and an error code was pushed, since the x86-64 ABI requires a 16-byte 1396 // stack alignment. 1397 if (Fn.getCallingConv() == CallingConv::X86_INTR && Is64Bit && 1398 Fn.arg_size() == 2) { 1399 StackSize += 8; 1400 MFI.setStackSize(StackSize); 1401 emitSPUpdate(MBB, MBBI, DL, -8, /*InEpilogue=*/false); 1402 } 1403 1404 // If this is x86-64 and the Red Zone is not disabled, if we are a leaf 1405 // function, and use up to 128 bytes of stack space, don't have a frame 1406 // pointer, calls, or dynamic alloca then we do not need to adjust the 1407 // stack pointer (we fit in the Red Zone). We also check that we don't 1408 // push and pop from the stack. 1409 if (has128ByteRedZone(MF) && !TRI->hasStackRealignment(MF) && 1410 !MFI.hasVarSizedObjects() && // No dynamic alloca. 1411 !MFI.adjustsStack() && // No calls. 1412 !EmitStackProbeCall && // No stack probes. 1413 !MFI.hasCopyImplyingStackAdjustment() && // Don't push and pop. 1414 !MF.shouldSplitStack()) { // Regular stack 1415 uint64_t MinSize = 1416 X86FI->getCalleeSavedFrameSize() - X86FI->getTCReturnAddrDelta(); 1417 if (HasFP) MinSize += SlotSize; 1418 X86FI->setUsesRedZone(MinSize > 0 || StackSize > 0); 1419 StackSize = std::max(MinSize, StackSize > 128 ? StackSize - 128 : 0); 1420 MFI.setStackSize(StackSize); 1421 } 1422 1423 // Insert stack pointer adjustment for later moving of return addr. Only 1424 // applies to tail call optimized functions where the callee argument stack 1425 // size is bigger than the callers. 1426 if (TailCallArgReserveSize != 0) { 1427 BuildStackAdjustment(MBB, MBBI, DL, -(int)TailCallArgReserveSize, 1428 /*InEpilogue=*/false) 1429 .setMIFlag(MachineInstr::FrameSetup); 1430 } 1431 1432 // Mapping for machine moves: 1433 // 1434 // DST: VirtualFP AND 1435 // SRC: VirtualFP => DW_CFA_def_cfa_offset 1436 // ELSE => DW_CFA_def_cfa 1437 // 1438 // SRC: VirtualFP AND 1439 // DST: Register => DW_CFA_def_cfa_register 1440 // 1441 // ELSE 1442 // OFFSET < 0 => DW_CFA_offset_extended_sf 1443 // REG < 64 => DW_CFA_offset + Reg 1444 // ELSE => DW_CFA_offset_extended 1445 1446 uint64_t NumBytes = 0; 1447 int stackGrowth = -SlotSize; 1448 1449 // Find the funclet establisher parameter 1450 Register Establisher = X86::NoRegister; 1451 if (IsClrFunclet) 1452 Establisher = Uses64BitFramePtr ? X86::RCX : X86::ECX; 1453 else if (IsFunclet) 1454 Establisher = Uses64BitFramePtr ? X86::RDX : X86::EDX; 1455 1456 if (IsWin64Prologue && IsFunclet && !IsClrFunclet) { 1457 // Immediately spill establisher into the home slot. 1458 // The runtime cares about this. 1459 // MOV64mr %rdx, 16(%rsp) 1460 unsigned MOVmr = Uses64BitFramePtr ? X86::MOV64mr : X86::MOV32mr; 1461 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(MOVmr)), StackPtr, true, 16) 1462 .addReg(Establisher) 1463 .setMIFlag(MachineInstr::FrameSetup); 1464 MBB.addLiveIn(Establisher); 1465 } 1466 1467 if (HasFP) { 1468 assert(MF.getRegInfo().isReserved(MachineFramePtr) && "FP reserved"); 1469 1470 // Calculate required stack adjustment. 1471 uint64_t FrameSize = StackSize - SlotSize; 1472 // If required, include space for extra hidden slot for stashing base pointer. 1473 if (X86FI->getRestoreBasePointer()) 1474 FrameSize += SlotSize; 1475 1476 NumBytes = FrameSize - 1477 (X86FI->getCalleeSavedFrameSize() + TailCallArgReserveSize); 1478 1479 // Callee-saved registers are pushed on stack before the stack is realigned. 1480 if (TRI->hasStackRealignment(MF) && !IsWin64Prologue) 1481 NumBytes = alignTo(NumBytes, MaxAlign); 1482 1483 // Save EBP/RBP into the appropriate stack slot. 1484 BuildMI(MBB, MBBI, DL, TII.get(Is64Bit ? X86::PUSH64r : X86::PUSH32r)) 1485 .addReg(MachineFramePtr, RegState::Kill) 1486 .setMIFlag(MachineInstr::FrameSetup); 1487 1488 if (NeedsDwarfCFI) { 1489 // Mark the place where EBP/RBP was saved. 1490 // Define the current CFA rule to use the provided offset. 1491 assert(StackSize); 1492 BuildCFI(MBB, MBBI, DL, 1493 MCCFIInstruction::cfiDefCfaOffset(nullptr, -2 * stackGrowth)); 1494 1495 // Change the rule for the FramePtr to be an "offset" rule. 1496 unsigned DwarfFramePtr = TRI->getDwarfRegNum(MachineFramePtr, true); 1497 BuildCFI(MBB, MBBI, DL, MCCFIInstruction::createOffset( 1498 nullptr, DwarfFramePtr, 2 * stackGrowth)); 1499 } 1500 1501 if (NeedsWinCFI) { 1502 HasWinCFI = true; 1503 BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_PushReg)) 1504 .addImm(FramePtr) 1505 .setMIFlag(MachineInstr::FrameSetup); 1506 } 1507 1508 if (!IsFunclet) { 1509 if (X86FI->hasSwiftAsyncContext()) { 1510 const auto &Attrs = MF.getFunction().getAttributes(); 1511 1512 // Before we update the live frame pointer we have to ensure there's a 1513 // valid (or null) asynchronous context in its slot just before FP in 1514 // the frame record, so store it now. 1515 if (Attrs.hasAttrSomewhere(Attribute::SwiftAsync)) { 1516 // We have an initial context in r14, store it just before the frame 1517 // pointer. 1518 MBB.addLiveIn(X86::R14); 1519 BuildMI(MBB, MBBI, DL, TII.get(X86::PUSH64r)) 1520 .addReg(X86::R14) 1521 .setMIFlag(MachineInstr::FrameSetup); 1522 } else { 1523 // No initial context, store null so that there's no pointer that 1524 // could be misused. 1525 BuildMI(MBB, MBBI, DL, TII.get(X86::PUSH64i8)) 1526 .addImm(0) 1527 .setMIFlag(MachineInstr::FrameSetup); 1528 } 1529 1530 if (NeedsWinCFI) { 1531 HasWinCFI = true; 1532 BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_PushReg)) 1533 .addImm(X86::R14) 1534 .setMIFlag(MachineInstr::FrameSetup); 1535 } 1536 1537 BuildMI(MBB, MBBI, DL, TII.get(X86::LEA64r), FramePtr) 1538 .addUse(X86::RSP) 1539 .addImm(1) 1540 .addUse(X86::NoRegister) 1541 .addImm(8) 1542 .addUse(X86::NoRegister) 1543 .setMIFlag(MachineInstr::FrameSetup); 1544 BuildMI(MBB, MBBI, DL, TII.get(X86::SUB64ri8), X86::RSP) 1545 .addUse(X86::RSP) 1546 .addImm(8) 1547 .setMIFlag(MachineInstr::FrameSetup); 1548 } 1549 1550 if (!IsWin64Prologue && !IsFunclet) { 1551 // Update EBP with the new base value. 1552 if (!X86FI->hasSwiftAsyncContext()) 1553 BuildMI(MBB, MBBI, DL, 1554 TII.get(Uses64BitFramePtr ? X86::MOV64rr : X86::MOV32rr), 1555 FramePtr) 1556 .addReg(StackPtr) 1557 .setMIFlag(MachineInstr::FrameSetup); 1558 1559 if (NeedsDwarfCFI) { 1560 // Mark effective beginning of when frame pointer becomes valid. 1561 // Define the current CFA to use the EBP/RBP register. 1562 unsigned DwarfFramePtr = TRI->getDwarfRegNum(MachineFramePtr, true); 1563 BuildCFI( 1564 MBB, MBBI, DL, 1565 MCCFIInstruction::createDefCfaRegister(nullptr, DwarfFramePtr)); 1566 } 1567 1568 if (NeedsWinFPO) { 1569 // .cv_fpo_setframe $FramePtr 1570 HasWinCFI = true; 1571 BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_SetFrame)) 1572 .addImm(FramePtr) 1573 .addImm(0) 1574 .setMIFlag(MachineInstr::FrameSetup); 1575 } 1576 } 1577 } 1578 } else { 1579 assert(!IsFunclet && "funclets without FPs not yet implemented"); 1580 NumBytes = StackSize - 1581 (X86FI->getCalleeSavedFrameSize() + TailCallArgReserveSize); 1582 } 1583 1584 // Update the offset adjustment, which is mainly used by codeview to translate 1585 // from ESP to VFRAME relative local variable offsets. 1586 if (!IsFunclet) { 1587 if (HasFP && TRI->hasStackRealignment(MF)) 1588 MFI.setOffsetAdjustment(-NumBytes); 1589 else 1590 MFI.setOffsetAdjustment(-StackSize); 1591 } 1592 1593 // For EH funclets, only allocate enough space for outgoing calls. Save the 1594 // NumBytes value that we would've used for the parent frame. 1595 unsigned ParentFrameNumBytes = NumBytes; 1596 if (IsFunclet) 1597 NumBytes = getWinEHFuncletFrameSize(MF); 1598 1599 // Skip the callee-saved push instructions. 1600 bool PushedRegs = false; 1601 int StackOffset = 2 * stackGrowth; 1602 1603 while (MBBI != MBB.end() && 1604 MBBI->getFlag(MachineInstr::FrameSetup) && 1605 (MBBI->getOpcode() == X86::PUSH32r || 1606 MBBI->getOpcode() == X86::PUSH64r)) { 1607 PushedRegs = true; 1608 Register Reg = MBBI->getOperand(0).getReg(); 1609 ++MBBI; 1610 1611 if (!HasFP && NeedsDwarfCFI) { 1612 // Mark callee-saved push instruction. 1613 // Define the current CFA rule to use the provided offset. 1614 assert(StackSize); 1615 BuildCFI(MBB, MBBI, DL, 1616 MCCFIInstruction::cfiDefCfaOffset(nullptr, -StackOffset)); 1617 StackOffset += stackGrowth; 1618 } 1619 1620 if (NeedsWinCFI) { 1621 HasWinCFI = true; 1622 BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_PushReg)) 1623 .addImm(Reg) 1624 .setMIFlag(MachineInstr::FrameSetup); 1625 } 1626 } 1627 1628 // Realign stack after we pushed callee-saved registers (so that we'll be 1629 // able to calculate their offsets from the frame pointer). 1630 // Don't do this for Win64, it needs to realign the stack after the prologue. 1631 if (!IsWin64Prologue && !IsFunclet && TRI->hasStackRealignment(MF)) { 1632 assert(HasFP && "There should be a frame pointer if stack is realigned."); 1633 BuildStackAlignAND(MBB, MBBI, DL, StackPtr, MaxAlign); 1634 1635 if (NeedsWinCFI) { 1636 HasWinCFI = true; 1637 BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_StackAlign)) 1638 .addImm(MaxAlign) 1639 .setMIFlag(MachineInstr::FrameSetup); 1640 } 1641 } 1642 1643 // If there is an SUB32ri of ESP immediately before this instruction, merge 1644 // the two. This can be the case when tail call elimination is enabled and 1645 // the callee has more arguments then the caller. 1646 NumBytes -= mergeSPUpdates(MBB, MBBI, true); 1647 1648 // Adjust stack pointer: ESP -= numbytes. 1649 1650 // Windows and cygwin/mingw require a prologue helper routine when allocating 1651 // more than 4K bytes on the stack. Windows uses __chkstk and cygwin/mingw 1652 // uses __alloca. __alloca and the 32-bit version of __chkstk will probe the 1653 // stack and adjust the stack pointer in one go. The 64-bit version of 1654 // __chkstk is only responsible for probing the stack. The 64-bit prologue is 1655 // responsible for adjusting the stack pointer. Touching the stack at 4K 1656 // increments is necessary to ensure that the guard pages used by the OS 1657 // virtual memory manager are allocated in correct sequence. 1658 uint64_t AlignedNumBytes = NumBytes; 1659 if (IsWin64Prologue && !IsFunclet && TRI->hasStackRealignment(MF)) 1660 AlignedNumBytes = alignTo(AlignedNumBytes, MaxAlign); 1661 if (AlignedNumBytes >= StackProbeSize && EmitStackProbeCall) { 1662 assert(!X86FI->getUsesRedZone() && 1663 "The Red Zone is not accounted for in stack probes"); 1664 1665 // Check whether EAX is livein for this block. 1666 bool isEAXAlive = isEAXLiveIn(MBB); 1667 1668 if (isEAXAlive) { 1669 if (Is64Bit) { 1670 // Save RAX 1671 BuildMI(MBB, MBBI, DL, TII.get(X86::PUSH64r)) 1672 .addReg(X86::RAX, RegState::Kill) 1673 .setMIFlag(MachineInstr::FrameSetup); 1674 } else { 1675 // Save EAX 1676 BuildMI(MBB, MBBI, DL, TII.get(X86::PUSH32r)) 1677 .addReg(X86::EAX, RegState::Kill) 1678 .setMIFlag(MachineInstr::FrameSetup); 1679 } 1680 } 1681 1682 if (Is64Bit) { 1683 // Handle the 64-bit Windows ABI case where we need to call __chkstk. 1684 // Function prologue is responsible for adjusting the stack pointer. 1685 int64_t Alloc = isEAXAlive ? NumBytes - 8 : NumBytes; 1686 if (isUInt<32>(Alloc)) { 1687 BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32ri), X86::EAX) 1688 .addImm(Alloc) 1689 .setMIFlag(MachineInstr::FrameSetup); 1690 } else if (isInt<32>(Alloc)) { 1691 BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64ri32), X86::RAX) 1692 .addImm(Alloc) 1693 .setMIFlag(MachineInstr::FrameSetup); 1694 } else { 1695 BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64ri), X86::RAX) 1696 .addImm(Alloc) 1697 .setMIFlag(MachineInstr::FrameSetup); 1698 } 1699 } else { 1700 // Allocate NumBytes-4 bytes on stack in case of isEAXAlive. 1701 // We'll also use 4 already allocated bytes for EAX. 1702 BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32ri), X86::EAX) 1703 .addImm(isEAXAlive ? NumBytes - 4 : NumBytes) 1704 .setMIFlag(MachineInstr::FrameSetup); 1705 } 1706 1707 // Call __chkstk, __chkstk_ms, or __alloca. 1708 emitStackProbe(MF, MBB, MBBI, DL, true); 1709 1710 if (isEAXAlive) { 1711 // Restore RAX/EAX 1712 MachineInstr *MI; 1713 if (Is64Bit) 1714 MI = addRegOffset(BuildMI(MF, DL, TII.get(X86::MOV64rm), X86::RAX), 1715 StackPtr, false, NumBytes - 8); 1716 else 1717 MI = addRegOffset(BuildMI(MF, DL, TII.get(X86::MOV32rm), X86::EAX), 1718 StackPtr, false, NumBytes - 4); 1719 MI->setFlag(MachineInstr::FrameSetup); 1720 MBB.insert(MBBI, MI); 1721 } 1722 } else if (NumBytes) { 1723 emitSPUpdate(MBB, MBBI, DL, -(int64_t)NumBytes, /*InEpilogue=*/false); 1724 } 1725 1726 if (NeedsWinCFI && NumBytes) { 1727 HasWinCFI = true; 1728 BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_StackAlloc)) 1729 .addImm(NumBytes) 1730 .setMIFlag(MachineInstr::FrameSetup); 1731 } 1732 1733 int SEHFrameOffset = 0; 1734 unsigned SPOrEstablisher; 1735 if (IsFunclet) { 1736 if (IsClrFunclet) { 1737 // The establisher parameter passed to a CLR funclet is actually a pointer 1738 // to the (mostly empty) frame of its nearest enclosing funclet; we have 1739 // to find the root function establisher frame by loading the PSPSym from 1740 // the intermediate frame. 1741 unsigned PSPSlotOffset = getPSPSlotOffsetFromSP(MF); 1742 MachinePointerInfo NoInfo; 1743 MBB.addLiveIn(Establisher); 1744 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64rm), Establisher), 1745 Establisher, false, PSPSlotOffset) 1746 .addMemOperand(MF.getMachineMemOperand( 1747 NoInfo, MachineMemOperand::MOLoad, SlotSize, Align(SlotSize))); 1748 ; 1749 // Save the root establisher back into the current funclet's (mostly 1750 // empty) frame, in case a sub-funclet or the GC needs it. 1751 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64mr)), StackPtr, 1752 false, PSPSlotOffset) 1753 .addReg(Establisher) 1754 .addMemOperand(MF.getMachineMemOperand( 1755 NoInfo, 1756 MachineMemOperand::MOStore | MachineMemOperand::MOVolatile, 1757 SlotSize, Align(SlotSize))); 1758 } 1759 SPOrEstablisher = Establisher; 1760 } else { 1761 SPOrEstablisher = StackPtr; 1762 } 1763 1764 if (IsWin64Prologue && HasFP) { 1765 // Set RBP to a small fixed offset from RSP. In the funclet case, we base 1766 // this calculation on the incoming establisher, which holds the value of 1767 // RSP from the parent frame at the end of the prologue. 1768 SEHFrameOffset = calculateSetFPREG(ParentFrameNumBytes); 1769 if (SEHFrameOffset) 1770 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::LEA64r), FramePtr), 1771 SPOrEstablisher, false, SEHFrameOffset); 1772 else 1773 BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64rr), FramePtr) 1774 .addReg(SPOrEstablisher); 1775 1776 // If this is not a funclet, emit the CFI describing our frame pointer. 1777 if (NeedsWinCFI && !IsFunclet) { 1778 assert(!NeedsWinFPO && "this setframe incompatible with FPO data"); 1779 HasWinCFI = true; 1780 BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_SetFrame)) 1781 .addImm(FramePtr) 1782 .addImm(SEHFrameOffset) 1783 .setMIFlag(MachineInstr::FrameSetup); 1784 if (isAsynchronousEHPersonality(Personality)) 1785 MF.getWinEHFuncInfo()->SEHSetFrameOffset = SEHFrameOffset; 1786 } 1787 } else if (IsFunclet && STI.is32Bit()) { 1788 // Reset EBP / ESI to something good for funclets. 1789 MBBI = restoreWin32EHStackPointers(MBB, MBBI, DL); 1790 // If we're a catch funclet, we can be returned to via catchret. Save ESP 1791 // into the registration node so that the runtime will restore it for us. 1792 if (!MBB.isCleanupFuncletEntry()) { 1793 assert(Personality == EHPersonality::MSVC_CXX); 1794 Register FrameReg; 1795 int FI = MF.getWinEHFuncInfo()->EHRegNodeFrameIndex; 1796 int64_t EHRegOffset = getFrameIndexReference(MF, FI, FrameReg).getFixed(); 1797 // ESP is the first field, so no extra displacement is needed. 1798 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32mr)), FrameReg, 1799 false, EHRegOffset) 1800 .addReg(X86::ESP); 1801 } 1802 } 1803 1804 while (MBBI != MBB.end() && MBBI->getFlag(MachineInstr::FrameSetup)) { 1805 const MachineInstr &FrameInstr = *MBBI; 1806 ++MBBI; 1807 1808 if (NeedsWinCFI) { 1809 int FI; 1810 if (unsigned Reg = TII.isStoreToStackSlot(FrameInstr, FI)) { 1811 if (X86::FR64RegClass.contains(Reg)) { 1812 int Offset; 1813 Register IgnoredFrameReg; 1814 if (IsWin64Prologue && IsFunclet) 1815 Offset = getWin64EHFrameIndexRef(MF, FI, IgnoredFrameReg); 1816 else 1817 Offset = 1818 getFrameIndexReference(MF, FI, IgnoredFrameReg).getFixed() + 1819 SEHFrameOffset; 1820 1821 HasWinCFI = true; 1822 assert(!NeedsWinFPO && "SEH_SaveXMM incompatible with FPO data"); 1823 BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_SaveXMM)) 1824 .addImm(Reg) 1825 .addImm(Offset) 1826 .setMIFlag(MachineInstr::FrameSetup); 1827 } 1828 } 1829 } 1830 } 1831 1832 if (NeedsWinCFI && HasWinCFI) 1833 BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_EndPrologue)) 1834 .setMIFlag(MachineInstr::FrameSetup); 1835 1836 if (FnHasClrFunclet && !IsFunclet) { 1837 // Save the so-called Initial-SP (i.e. the value of the stack pointer 1838 // immediately after the prolog) into the PSPSlot so that funclets 1839 // and the GC can recover it. 1840 unsigned PSPSlotOffset = getPSPSlotOffsetFromSP(MF); 1841 auto PSPInfo = MachinePointerInfo::getFixedStack( 1842 MF, MF.getWinEHFuncInfo()->PSPSymFrameIdx); 1843 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64mr)), StackPtr, false, 1844 PSPSlotOffset) 1845 .addReg(StackPtr) 1846 .addMemOperand(MF.getMachineMemOperand( 1847 PSPInfo, MachineMemOperand::MOStore | MachineMemOperand::MOVolatile, 1848 SlotSize, Align(SlotSize))); 1849 } 1850 1851 // Realign stack after we spilled callee-saved registers (so that we'll be 1852 // able to calculate their offsets from the frame pointer). 1853 // Win64 requires aligning the stack after the prologue. 1854 if (IsWin64Prologue && TRI->hasStackRealignment(MF)) { 1855 assert(HasFP && "There should be a frame pointer if stack is realigned."); 1856 BuildStackAlignAND(MBB, MBBI, DL, SPOrEstablisher, MaxAlign); 1857 } 1858 1859 // We already dealt with stack realignment and funclets above. 1860 if (IsFunclet && STI.is32Bit()) 1861 return; 1862 1863 // If we need a base pointer, set it up here. It's whatever the value 1864 // of the stack pointer is at this point. Any variable size objects 1865 // will be allocated after this, so we can still use the base pointer 1866 // to reference locals. 1867 if (TRI->hasBasePointer(MF)) { 1868 // Update the base pointer with the current stack pointer. 1869 unsigned Opc = Uses64BitFramePtr ? X86::MOV64rr : X86::MOV32rr; 1870 BuildMI(MBB, MBBI, DL, TII.get(Opc), BasePtr) 1871 .addReg(SPOrEstablisher) 1872 .setMIFlag(MachineInstr::FrameSetup); 1873 if (X86FI->getRestoreBasePointer()) { 1874 // Stash value of base pointer. Saving RSP instead of EBP shortens 1875 // dependence chain. Used by SjLj EH. 1876 unsigned Opm = Uses64BitFramePtr ? X86::MOV64mr : X86::MOV32mr; 1877 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(Opm)), 1878 FramePtr, true, X86FI->getRestoreBasePointerOffset()) 1879 .addReg(SPOrEstablisher) 1880 .setMIFlag(MachineInstr::FrameSetup); 1881 } 1882 1883 if (X86FI->getHasSEHFramePtrSave() && !IsFunclet) { 1884 // Stash the value of the frame pointer relative to the base pointer for 1885 // Win32 EH. This supports Win32 EH, which does the inverse of the above: 1886 // it recovers the frame pointer from the base pointer rather than the 1887 // other way around. 1888 unsigned Opm = Uses64BitFramePtr ? X86::MOV64mr : X86::MOV32mr; 1889 Register UsedReg; 1890 int Offset = 1891 getFrameIndexReference(MF, X86FI->getSEHFramePtrSaveIndex(), UsedReg) 1892 .getFixed(); 1893 assert(UsedReg == BasePtr); 1894 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(Opm)), UsedReg, true, Offset) 1895 .addReg(FramePtr) 1896 .setMIFlag(MachineInstr::FrameSetup); 1897 } 1898 } 1899 1900 if (((!HasFP && NumBytes) || PushedRegs) && NeedsDwarfCFI) { 1901 // Mark end of stack pointer adjustment. 1902 if (!HasFP && NumBytes) { 1903 // Define the current CFA rule to use the provided offset. 1904 assert(StackSize); 1905 BuildCFI( 1906 MBB, MBBI, DL, 1907 MCCFIInstruction::cfiDefCfaOffset(nullptr, StackSize - stackGrowth)); 1908 } 1909 1910 // Emit DWARF info specifying the offsets of the callee-saved registers. 1911 emitCalleeSavedFrameMoves(MBB, MBBI, DL, true); 1912 } 1913 1914 // X86 Interrupt handling function cannot assume anything about the direction 1915 // flag (DF in EFLAGS register). Clear this flag by creating "cld" instruction 1916 // in each prologue of interrupt handler function. 1917 // 1918 // FIXME: Create "cld" instruction only in these cases: 1919 // 1. The interrupt handling function uses any of the "rep" instructions. 1920 // 2. Interrupt handling function calls another function. 1921 // 1922 if (Fn.getCallingConv() == CallingConv::X86_INTR) 1923 BuildMI(MBB, MBBI, DL, TII.get(X86::CLD)) 1924 .setMIFlag(MachineInstr::FrameSetup); 1925 1926 // At this point we know if the function has WinCFI or not. 1927 MF.setHasWinCFI(HasWinCFI); 1928 } 1929 1930 bool X86FrameLowering::canUseLEAForSPInEpilogue( 1931 const MachineFunction &MF) const { 1932 // We can't use LEA instructions for adjusting the stack pointer if we don't 1933 // have a frame pointer in the Win64 ABI. Only ADD instructions may be used 1934 // to deallocate the stack. 1935 // This means that we can use LEA for SP in two situations: 1936 // 1. We *aren't* using the Win64 ABI which means we are free to use LEA. 1937 // 2. We *have* a frame pointer which means we are permitted to use LEA. 1938 return !MF.getTarget().getMCAsmInfo()->usesWindowsCFI() || hasFP(MF); 1939 } 1940 1941 static bool isFuncletReturnInstr(MachineInstr &MI) { 1942 switch (MI.getOpcode()) { 1943 case X86::CATCHRET: 1944 case X86::CLEANUPRET: 1945 return true; 1946 default: 1947 return false; 1948 } 1949 llvm_unreachable("impossible"); 1950 } 1951 1952 // CLR funclets use a special "Previous Stack Pointer Symbol" slot on the 1953 // stack. It holds a pointer to the bottom of the root function frame. The 1954 // establisher frame pointer passed to a nested funclet may point to the 1955 // (mostly empty) frame of its parent funclet, but it will need to find 1956 // the frame of the root function to access locals. To facilitate this, 1957 // every funclet copies the pointer to the bottom of the root function 1958 // frame into a PSPSym slot in its own (mostly empty) stack frame. Using the 1959 // same offset for the PSPSym in the root function frame that's used in the 1960 // funclets' frames allows each funclet to dynamically accept any ancestor 1961 // frame as its establisher argument (the runtime doesn't guarantee the 1962 // immediate parent for some reason lost to history), and also allows the GC, 1963 // which uses the PSPSym for some bookkeeping, to find it in any funclet's 1964 // frame with only a single offset reported for the entire method. 1965 unsigned 1966 X86FrameLowering::getPSPSlotOffsetFromSP(const MachineFunction &MF) const { 1967 const WinEHFuncInfo &Info = *MF.getWinEHFuncInfo(); 1968 Register SPReg; 1969 int Offset = getFrameIndexReferencePreferSP(MF, Info.PSPSymFrameIdx, SPReg, 1970 /*IgnoreSPUpdates*/ true) 1971 .getFixed(); 1972 assert(Offset >= 0 && SPReg == TRI->getStackRegister()); 1973 return static_cast<unsigned>(Offset); 1974 } 1975 1976 unsigned 1977 X86FrameLowering::getWinEHFuncletFrameSize(const MachineFunction &MF) const { 1978 const X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); 1979 // This is the size of the pushed CSRs. 1980 unsigned CSSize = X86FI->getCalleeSavedFrameSize(); 1981 // This is the size of callee saved XMMs. 1982 const auto& WinEHXMMSlotInfo = X86FI->getWinEHXMMSlotInfo(); 1983 unsigned XMMSize = WinEHXMMSlotInfo.size() * 1984 TRI->getSpillSize(X86::VR128RegClass); 1985 // This is the amount of stack a funclet needs to allocate. 1986 unsigned UsedSize; 1987 EHPersonality Personality = 1988 classifyEHPersonality(MF.getFunction().getPersonalityFn()); 1989 if (Personality == EHPersonality::CoreCLR) { 1990 // CLR funclets need to hold enough space to include the PSPSym, at the 1991 // same offset from the stack pointer (immediately after the prolog) as it 1992 // resides at in the main function. 1993 UsedSize = getPSPSlotOffsetFromSP(MF) + SlotSize; 1994 } else { 1995 // Other funclets just need enough stack for outgoing call arguments. 1996 UsedSize = MF.getFrameInfo().getMaxCallFrameSize(); 1997 } 1998 // RBP is not included in the callee saved register block. After pushing RBP, 1999 // everything is 16 byte aligned. Everything we allocate before an outgoing 2000 // call must also be 16 byte aligned. 2001 unsigned FrameSizeMinusRBP = alignTo(CSSize + UsedSize, getStackAlign()); 2002 // Subtract out the size of the callee saved registers. This is how much stack 2003 // each funclet will allocate. 2004 return FrameSizeMinusRBP + XMMSize - CSSize; 2005 } 2006 2007 static bool isTailCallOpcode(unsigned Opc) { 2008 return Opc == X86::TCRETURNri || Opc == X86::TCRETURNdi || 2009 Opc == X86::TCRETURNmi || 2010 Opc == X86::TCRETURNri64 || Opc == X86::TCRETURNdi64 || 2011 Opc == X86::TCRETURNmi64; 2012 } 2013 2014 void X86FrameLowering::emitEpilogue(MachineFunction &MF, 2015 MachineBasicBlock &MBB) const { 2016 const MachineFrameInfo &MFI = MF.getFrameInfo(); 2017 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); 2018 MachineBasicBlock::iterator Terminator = MBB.getFirstTerminator(); 2019 MachineBasicBlock::iterator MBBI = Terminator; 2020 DebugLoc DL; 2021 if (MBBI != MBB.end()) 2022 DL = MBBI->getDebugLoc(); 2023 // standard x86_64 and NaCl use 64-bit frame/stack pointers, x32 - 32-bit. 2024 const bool Is64BitILP32 = STI.isTarget64BitILP32(); 2025 Register FramePtr = TRI->getFrameRegister(MF); 2026 Register MachineFramePtr = 2027 Is64BitILP32 ? Register(getX86SubSuperRegister(FramePtr, 64)) : FramePtr; 2028 2029 bool IsWin64Prologue = MF.getTarget().getMCAsmInfo()->usesWindowsCFI(); 2030 bool NeedsWin64CFI = 2031 IsWin64Prologue && MF.getFunction().needsUnwindTableEntry(); 2032 bool IsFunclet = MBBI == MBB.end() ? false : isFuncletReturnInstr(*MBBI); 2033 2034 // Get the number of bytes to allocate from the FrameInfo. 2035 uint64_t StackSize = MFI.getStackSize(); 2036 uint64_t MaxAlign = calculateMaxStackAlign(MF); 2037 unsigned CSSize = X86FI->getCalleeSavedFrameSize(); 2038 unsigned TailCallArgReserveSize = -X86FI->getTCReturnAddrDelta(); 2039 bool HasFP = hasFP(MF); 2040 uint64_t NumBytes = 0; 2041 2042 bool NeedsDwarfCFI = (!MF.getTarget().getTargetTriple().isOSDarwin() && 2043 !MF.getTarget().getTargetTriple().isOSWindows()) && 2044 MF.needsFrameMoves(); 2045 2046 if (IsFunclet) { 2047 assert(HasFP && "EH funclets without FP not yet implemented"); 2048 NumBytes = getWinEHFuncletFrameSize(MF); 2049 } else if (HasFP) { 2050 // Calculate required stack adjustment. 2051 uint64_t FrameSize = StackSize - SlotSize; 2052 NumBytes = FrameSize - CSSize - TailCallArgReserveSize; 2053 2054 // Callee-saved registers were pushed on stack before the stack was 2055 // realigned. 2056 if (TRI->hasStackRealignment(MF) && !IsWin64Prologue) 2057 NumBytes = alignTo(FrameSize, MaxAlign); 2058 } else { 2059 NumBytes = StackSize - CSSize - TailCallArgReserveSize; 2060 } 2061 uint64_t SEHStackAllocAmt = NumBytes; 2062 2063 // AfterPop is the position to insert .cfi_restore. 2064 MachineBasicBlock::iterator AfterPop = MBBI; 2065 if (HasFP) { 2066 if (X86FI->hasSwiftAsyncContext()) { 2067 // Discard the context. 2068 int Offset = 16 + mergeSPUpdates(MBB, MBBI, true); 2069 emitSPUpdate(MBB, MBBI, DL, Offset, /*InEpilogue*/true); 2070 } 2071 // Pop EBP. 2072 BuildMI(MBB, MBBI, DL, TII.get(Is64Bit ? X86::POP64r : X86::POP32r), 2073 MachineFramePtr) 2074 .setMIFlag(MachineInstr::FrameDestroy); 2075 2076 // We need to reset FP to its untagged state on return. Bit 60 is currently 2077 // used to show the presence of an extended frame. 2078 if (X86FI->hasSwiftAsyncContext()) { 2079 BuildMI(MBB, MBBI, DL, TII.get(X86::BTR64ri8), 2080 MachineFramePtr) 2081 .addUse(MachineFramePtr) 2082 .addImm(60) 2083 .setMIFlag(MachineInstr::FrameDestroy); 2084 } 2085 2086 if (NeedsDwarfCFI) { 2087 unsigned DwarfStackPtr = 2088 TRI->getDwarfRegNum(Is64Bit ? X86::RSP : X86::ESP, true); 2089 BuildCFI(MBB, MBBI, DL, 2090 MCCFIInstruction::cfiDefCfa(nullptr, DwarfStackPtr, SlotSize)); 2091 if (!MBB.succ_empty() && !MBB.isReturnBlock()) { 2092 unsigned DwarfFramePtr = TRI->getDwarfRegNum(MachineFramePtr, true); 2093 BuildCFI(MBB, AfterPop, DL, 2094 MCCFIInstruction::createRestore(nullptr, DwarfFramePtr)); 2095 --MBBI; 2096 --AfterPop; 2097 } 2098 --MBBI; 2099 } 2100 } 2101 2102 MachineBasicBlock::iterator FirstCSPop = MBBI; 2103 // Skip the callee-saved pop instructions. 2104 while (MBBI != MBB.begin()) { 2105 MachineBasicBlock::iterator PI = std::prev(MBBI); 2106 unsigned Opc = PI->getOpcode(); 2107 2108 if (Opc != X86::DBG_VALUE && !PI->isTerminator()) { 2109 if ((Opc != X86::POP32r || !PI->getFlag(MachineInstr::FrameDestroy)) && 2110 (Opc != X86::POP64r || !PI->getFlag(MachineInstr::FrameDestroy)) && 2111 (Opc != X86::BTR64ri8 || !PI->getFlag(MachineInstr::FrameDestroy)) && 2112 (Opc != X86::ADD64ri8 || !PI->getFlag(MachineInstr::FrameDestroy))) 2113 break; 2114 FirstCSPop = PI; 2115 } 2116 2117 --MBBI; 2118 } 2119 MBBI = FirstCSPop; 2120 2121 if (IsFunclet && Terminator->getOpcode() == X86::CATCHRET) 2122 emitCatchRetReturnValue(MBB, FirstCSPop, &*Terminator); 2123 2124 if (MBBI != MBB.end()) 2125 DL = MBBI->getDebugLoc(); 2126 // If there is an ADD32ri or SUB32ri of ESP immediately before this 2127 // instruction, merge the two instructions. 2128 if (NumBytes || MFI.hasVarSizedObjects()) 2129 NumBytes += mergeSPUpdates(MBB, MBBI, true); 2130 2131 // If dynamic alloca is used, then reset esp to point to the last callee-saved 2132 // slot before popping them off! Same applies for the case, when stack was 2133 // realigned. Don't do this if this was a funclet epilogue, since the funclets 2134 // will not do realignment or dynamic stack allocation. 2135 if (((TRI->hasStackRealignment(MF)) || MFI.hasVarSizedObjects()) && 2136 !IsFunclet) { 2137 if (TRI->hasStackRealignment(MF)) 2138 MBBI = FirstCSPop; 2139 unsigned SEHFrameOffset = calculateSetFPREG(SEHStackAllocAmt); 2140 uint64_t LEAAmount = 2141 IsWin64Prologue ? SEHStackAllocAmt - SEHFrameOffset : -CSSize; 2142 2143 if (X86FI->hasSwiftAsyncContext()) 2144 LEAAmount -= 16; 2145 2146 // There are only two legal forms of epilogue: 2147 // - add SEHAllocationSize, %rsp 2148 // - lea SEHAllocationSize(%FramePtr), %rsp 2149 // 2150 // 'mov %FramePtr, %rsp' will not be recognized as an epilogue sequence. 2151 // However, we may use this sequence if we have a frame pointer because the 2152 // effects of the prologue can safely be undone. 2153 if (LEAAmount != 0) { 2154 unsigned Opc = getLEArOpcode(Uses64BitFramePtr); 2155 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr), 2156 FramePtr, false, LEAAmount); 2157 --MBBI; 2158 } else { 2159 unsigned Opc = (Uses64BitFramePtr ? X86::MOV64rr : X86::MOV32rr); 2160 BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr) 2161 .addReg(FramePtr); 2162 --MBBI; 2163 } 2164 } else if (NumBytes) { 2165 // Adjust stack pointer back: ESP += numbytes. 2166 emitSPUpdate(MBB, MBBI, DL, NumBytes, /*InEpilogue=*/true); 2167 if (!HasFP && NeedsDwarfCFI) { 2168 // Define the current CFA rule to use the provided offset. 2169 BuildCFI(MBB, MBBI, DL, 2170 MCCFIInstruction::cfiDefCfaOffset( 2171 nullptr, CSSize + TailCallArgReserveSize + SlotSize)); 2172 } 2173 --MBBI; 2174 } 2175 2176 // Windows unwinder will not invoke function's exception handler if IP is 2177 // either in prologue or in epilogue. This behavior causes a problem when a 2178 // call immediately precedes an epilogue, because the return address points 2179 // into the epilogue. To cope with that, we insert an epilogue marker here, 2180 // then replace it with a 'nop' if it ends up immediately after a CALL in the 2181 // final emitted code. 2182 if (NeedsWin64CFI && MF.hasWinCFI()) 2183 BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_Epilogue)); 2184 2185 if (!HasFP && NeedsDwarfCFI) { 2186 MBBI = FirstCSPop; 2187 int64_t Offset = -CSSize - SlotSize; 2188 // Mark callee-saved pop instruction. 2189 // Define the current CFA rule to use the provided offset. 2190 while (MBBI != MBB.end()) { 2191 MachineBasicBlock::iterator PI = MBBI; 2192 unsigned Opc = PI->getOpcode(); 2193 ++MBBI; 2194 if (Opc == X86::POP32r || Opc == X86::POP64r) { 2195 Offset += SlotSize; 2196 BuildCFI(MBB, MBBI, DL, 2197 MCCFIInstruction::cfiDefCfaOffset(nullptr, -Offset)); 2198 } 2199 } 2200 } 2201 2202 // Emit DWARF info specifying the restores of the callee-saved registers. 2203 // For epilogue with return inside or being other block without successor, 2204 // no need to generate .cfi_restore for callee-saved registers. 2205 if (NeedsDwarfCFI && !MBB.succ_empty()) 2206 emitCalleeSavedFrameMoves(MBB, AfterPop, DL, false); 2207 2208 if (Terminator == MBB.end() || !isTailCallOpcode(Terminator->getOpcode())) { 2209 // Add the return addr area delta back since we are not tail calling. 2210 int Offset = -1 * X86FI->getTCReturnAddrDelta(); 2211 assert(Offset >= 0 && "TCDelta should never be positive"); 2212 if (Offset) { 2213 // Check for possible merge with preceding ADD instruction. 2214 Offset += mergeSPUpdates(MBB, Terminator, true); 2215 emitSPUpdate(MBB, Terminator, DL, Offset, /*InEpilogue=*/true); 2216 } 2217 } 2218 2219 // Emit tilerelease for AMX kernel. 2220 if (X86FI->hasVirtualTileReg()) 2221 BuildMI(MBB, Terminator, DL, TII.get(X86::TILERELEASE)); 2222 } 2223 2224 StackOffset X86FrameLowering::getFrameIndexReference(const MachineFunction &MF, 2225 int FI, 2226 Register &FrameReg) const { 2227 const MachineFrameInfo &MFI = MF.getFrameInfo(); 2228 2229 bool IsFixed = MFI.isFixedObjectIndex(FI); 2230 // We can't calculate offset from frame pointer if the stack is realigned, 2231 // so enforce usage of stack/base pointer. The base pointer is used when we 2232 // have dynamic allocas in addition to dynamic realignment. 2233 if (TRI->hasBasePointer(MF)) 2234 FrameReg = IsFixed ? TRI->getFramePtr() : TRI->getBaseRegister(); 2235 else if (TRI->hasStackRealignment(MF)) 2236 FrameReg = IsFixed ? TRI->getFramePtr() : TRI->getStackRegister(); 2237 else 2238 FrameReg = TRI->getFrameRegister(MF); 2239 2240 // Offset will hold the offset from the stack pointer at function entry to the 2241 // object. 2242 // We need to factor in additional offsets applied during the prologue to the 2243 // frame, base, and stack pointer depending on which is used. 2244 int Offset = MFI.getObjectOffset(FI) - getOffsetOfLocalArea(); 2245 const X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); 2246 unsigned CSSize = X86FI->getCalleeSavedFrameSize(); 2247 uint64_t StackSize = MFI.getStackSize(); 2248 bool IsWin64Prologue = MF.getTarget().getMCAsmInfo()->usesWindowsCFI(); 2249 int64_t FPDelta = 0; 2250 2251 // In an x86 interrupt, remove the offset we added to account for the return 2252 // address from any stack object allocated in the caller's frame. Interrupts 2253 // do not have a standard return address. Fixed objects in the current frame, 2254 // such as SSE register spills, should not get this treatment. 2255 if (MF.getFunction().getCallingConv() == CallingConv::X86_INTR && 2256 Offset >= 0) { 2257 Offset += getOffsetOfLocalArea(); 2258 } 2259 2260 if (IsWin64Prologue) { 2261 assert(!MFI.hasCalls() || (StackSize % 16) == 8); 2262 2263 // Calculate required stack adjustment. 2264 uint64_t FrameSize = StackSize - SlotSize; 2265 // If required, include space for extra hidden slot for stashing base pointer. 2266 if (X86FI->getRestoreBasePointer()) 2267 FrameSize += SlotSize; 2268 uint64_t NumBytes = FrameSize - CSSize; 2269 2270 uint64_t SEHFrameOffset = calculateSetFPREG(NumBytes); 2271 if (FI && FI == X86FI->getFAIndex()) 2272 return StackOffset::getFixed(-SEHFrameOffset); 2273 2274 // FPDelta is the offset from the "traditional" FP location of the old base 2275 // pointer followed by return address and the location required by the 2276 // restricted Win64 prologue. 2277 // Add FPDelta to all offsets below that go through the frame pointer. 2278 FPDelta = FrameSize - SEHFrameOffset; 2279 assert((!MFI.hasCalls() || (FPDelta % 16) == 0) && 2280 "FPDelta isn't aligned per the Win64 ABI!"); 2281 } 2282 2283 if (FrameReg == TRI->getFramePtr()) { 2284 // Skip saved EBP/RBP 2285 Offset += SlotSize; 2286 2287 // Account for restricted Windows prologue. 2288 Offset += FPDelta; 2289 2290 // Skip the RETADDR move area 2291 int TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta(); 2292 if (TailCallReturnAddrDelta < 0) 2293 Offset -= TailCallReturnAddrDelta; 2294 2295 return StackOffset::getFixed(Offset); 2296 } 2297 2298 // FrameReg is either the stack pointer or a base pointer. But the base is 2299 // located at the end of the statically known StackSize so the distinction 2300 // doesn't really matter. 2301 if (TRI->hasStackRealignment(MF) || TRI->hasBasePointer(MF)) 2302 assert(isAligned(MFI.getObjectAlign(FI), -(Offset + StackSize))); 2303 return StackOffset::getFixed(Offset + StackSize); 2304 } 2305 2306 int X86FrameLowering::getWin64EHFrameIndexRef(const MachineFunction &MF, int FI, 2307 Register &FrameReg) const { 2308 const MachineFrameInfo &MFI = MF.getFrameInfo(); 2309 const X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); 2310 const auto& WinEHXMMSlotInfo = X86FI->getWinEHXMMSlotInfo(); 2311 const auto it = WinEHXMMSlotInfo.find(FI); 2312 2313 if (it == WinEHXMMSlotInfo.end()) 2314 return getFrameIndexReference(MF, FI, FrameReg).getFixed(); 2315 2316 FrameReg = TRI->getStackRegister(); 2317 return alignDown(MFI.getMaxCallFrameSize(), getStackAlign().value()) + 2318 it->second; 2319 } 2320 2321 StackOffset 2322 X86FrameLowering::getFrameIndexReferenceSP(const MachineFunction &MF, int FI, 2323 Register &FrameReg, 2324 int Adjustment) const { 2325 const MachineFrameInfo &MFI = MF.getFrameInfo(); 2326 FrameReg = TRI->getStackRegister(); 2327 return StackOffset::getFixed(MFI.getObjectOffset(FI) - 2328 getOffsetOfLocalArea() + Adjustment); 2329 } 2330 2331 StackOffset 2332 X86FrameLowering::getFrameIndexReferencePreferSP(const MachineFunction &MF, 2333 int FI, Register &FrameReg, 2334 bool IgnoreSPUpdates) const { 2335 2336 const MachineFrameInfo &MFI = MF.getFrameInfo(); 2337 // Does not include any dynamic realign. 2338 const uint64_t StackSize = MFI.getStackSize(); 2339 // LLVM arranges the stack as follows: 2340 // ... 2341 // ARG2 2342 // ARG1 2343 // RETADDR 2344 // PUSH RBP <-- RBP points here 2345 // PUSH CSRs 2346 // ~~~~~~~ <-- possible stack realignment (non-win64) 2347 // ... 2348 // STACK OBJECTS 2349 // ... <-- RSP after prologue points here 2350 // ~~~~~~~ <-- possible stack realignment (win64) 2351 // 2352 // if (hasVarSizedObjects()): 2353 // ... <-- "base pointer" (ESI/RBX) points here 2354 // DYNAMIC ALLOCAS 2355 // ... <-- RSP points here 2356 // 2357 // Case 1: In the simple case of no stack realignment and no dynamic 2358 // allocas, both "fixed" stack objects (arguments and CSRs) are addressable 2359 // with fixed offsets from RSP. 2360 // 2361 // Case 2: In the case of stack realignment with no dynamic allocas, fixed 2362 // stack objects are addressed with RBP and regular stack objects with RSP. 2363 // 2364 // Case 3: In the case of dynamic allocas and stack realignment, RSP is used 2365 // to address stack arguments for outgoing calls and nothing else. The "base 2366 // pointer" points to local variables, and RBP points to fixed objects. 2367 // 2368 // In cases 2 and 3, we can only answer for non-fixed stack objects, and the 2369 // answer we give is relative to the SP after the prologue, and not the 2370 // SP in the middle of the function. 2371 2372 if (MFI.isFixedObjectIndex(FI) && TRI->hasStackRealignment(MF) && 2373 !STI.isTargetWin64()) 2374 return getFrameIndexReference(MF, FI, FrameReg); 2375 2376 // If !hasReservedCallFrame the function might have SP adjustement in the 2377 // body. So, even though the offset is statically known, it depends on where 2378 // we are in the function. 2379 if (!IgnoreSPUpdates && !hasReservedCallFrame(MF)) 2380 return getFrameIndexReference(MF, FI, FrameReg); 2381 2382 // We don't handle tail calls, and shouldn't be seeing them either. 2383 assert(MF.getInfo<X86MachineFunctionInfo>()->getTCReturnAddrDelta() >= 0 && 2384 "we don't handle this case!"); 2385 2386 // This is how the math works out: 2387 // 2388 // %rsp grows (i.e. gets lower) left to right. Each box below is 2389 // one word (eight bytes). Obj0 is the stack slot we're trying to 2390 // get to. 2391 // 2392 // ---------------------------------- 2393 // | BP | Obj0 | Obj1 | ... | ObjN | 2394 // ---------------------------------- 2395 // ^ ^ ^ ^ 2396 // A B C E 2397 // 2398 // A is the incoming stack pointer. 2399 // (B - A) is the local area offset (-8 for x86-64) [1] 2400 // (C - A) is the Offset returned by MFI.getObjectOffset for Obj0 [2] 2401 // 2402 // |(E - B)| is the StackSize (absolute value, positive). For a 2403 // stack that grown down, this works out to be (B - E). [3] 2404 // 2405 // E is also the value of %rsp after stack has been set up, and we 2406 // want (C - E) -- the value we can add to %rsp to get to Obj0. Now 2407 // (C - E) == (C - A) - (B - A) + (B - E) 2408 // { Using [1], [2] and [3] above } 2409 // == getObjectOffset - LocalAreaOffset + StackSize 2410 2411 return getFrameIndexReferenceSP(MF, FI, FrameReg, StackSize); 2412 } 2413 2414 bool X86FrameLowering::assignCalleeSavedSpillSlots( 2415 MachineFunction &MF, const TargetRegisterInfo *TRI, 2416 std::vector<CalleeSavedInfo> &CSI) const { 2417 MachineFrameInfo &MFI = MF.getFrameInfo(); 2418 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); 2419 2420 unsigned CalleeSavedFrameSize = 0; 2421 unsigned XMMCalleeSavedFrameSize = 0; 2422 auto &WinEHXMMSlotInfo = X86FI->getWinEHXMMSlotInfo(); 2423 int SpillSlotOffset = getOffsetOfLocalArea() + X86FI->getTCReturnAddrDelta(); 2424 2425 int64_t TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta(); 2426 2427 if (TailCallReturnAddrDelta < 0) { 2428 // create RETURNADDR area 2429 // arg 2430 // arg 2431 // RETADDR 2432 // { ... 2433 // RETADDR area 2434 // ... 2435 // } 2436 // [EBP] 2437 MFI.CreateFixedObject(-TailCallReturnAddrDelta, 2438 TailCallReturnAddrDelta - SlotSize, true); 2439 } 2440 2441 // Spill the BasePtr if it's used. 2442 if (this->TRI->hasBasePointer(MF)) { 2443 // Allocate a spill slot for EBP if we have a base pointer and EH funclets. 2444 if (MF.hasEHFunclets()) { 2445 int FI = MFI.CreateSpillStackObject(SlotSize, Align(SlotSize)); 2446 X86FI->setHasSEHFramePtrSave(true); 2447 X86FI->setSEHFramePtrSaveIndex(FI); 2448 } 2449 } 2450 2451 if (hasFP(MF)) { 2452 // emitPrologue always spills frame register the first thing. 2453 SpillSlotOffset -= SlotSize; 2454 MFI.CreateFixedSpillStackObject(SlotSize, SpillSlotOffset); 2455 2456 // The async context lives directly before the frame pointer, and we 2457 // allocate a second slot to preserve stack alignment. 2458 if (X86FI->hasSwiftAsyncContext()) { 2459 SpillSlotOffset -= SlotSize; 2460 MFI.CreateFixedSpillStackObject(SlotSize, SpillSlotOffset); 2461 SpillSlotOffset -= SlotSize; 2462 } 2463 2464 // Since emitPrologue and emitEpilogue will handle spilling and restoring of 2465 // the frame register, we can delete it from CSI list and not have to worry 2466 // about avoiding it later. 2467 Register FPReg = TRI->getFrameRegister(MF); 2468 for (unsigned i = 0; i < CSI.size(); ++i) { 2469 if (TRI->regsOverlap(CSI[i].getReg(),FPReg)) { 2470 CSI.erase(CSI.begin() + i); 2471 break; 2472 } 2473 } 2474 } 2475 2476 // Assign slots for GPRs. It increases frame size. 2477 for (unsigned i = CSI.size(); i != 0; --i) { 2478 unsigned Reg = CSI[i - 1].getReg(); 2479 2480 if (!X86::GR64RegClass.contains(Reg) && !X86::GR32RegClass.contains(Reg)) 2481 continue; 2482 2483 SpillSlotOffset -= SlotSize; 2484 CalleeSavedFrameSize += SlotSize; 2485 2486 int SlotIndex = MFI.CreateFixedSpillStackObject(SlotSize, SpillSlotOffset); 2487 CSI[i - 1].setFrameIdx(SlotIndex); 2488 } 2489 2490 X86FI->setCalleeSavedFrameSize(CalleeSavedFrameSize); 2491 MFI.setCVBytesOfCalleeSavedRegisters(CalleeSavedFrameSize); 2492 2493 // Assign slots for XMMs. 2494 for (unsigned i = CSI.size(); i != 0; --i) { 2495 unsigned Reg = CSI[i - 1].getReg(); 2496 if (X86::GR64RegClass.contains(Reg) || X86::GR32RegClass.contains(Reg)) 2497 continue; 2498 2499 // If this is k-register make sure we lookup via the largest legal type. 2500 MVT VT = MVT::Other; 2501 if (X86::VK16RegClass.contains(Reg)) 2502 VT = STI.hasBWI() ? MVT::v64i1 : MVT::v16i1; 2503 2504 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg, VT); 2505 unsigned Size = TRI->getSpillSize(*RC); 2506 Align Alignment = TRI->getSpillAlign(*RC); 2507 // ensure alignment 2508 assert(SpillSlotOffset < 0 && "SpillSlotOffset should always < 0 on X86"); 2509 SpillSlotOffset = -alignTo(-SpillSlotOffset, Alignment); 2510 2511 // spill into slot 2512 SpillSlotOffset -= Size; 2513 int SlotIndex = MFI.CreateFixedSpillStackObject(Size, SpillSlotOffset); 2514 CSI[i - 1].setFrameIdx(SlotIndex); 2515 MFI.ensureMaxAlignment(Alignment); 2516 2517 // Save the start offset and size of XMM in stack frame for funclets. 2518 if (X86::VR128RegClass.contains(Reg)) { 2519 WinEHXMMSlotInfo[SlotIndex] = XMMCalleeSavedFrameSize; 2520 XMMCalleeSavedFrameSize += Size; 2521 } 2522 } 2523 2524 return true; 2525 } 2526 2527 bool X86FrameLowering::spillCalleeSavedRegisters( 2528 MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, 2529 ArrayRef<CalleeSavedInfo> CSI, const TargetRegisterInfo *TRI) const { 2530 DebugLoc DL = MBB.findDebugLoc(MI); 2531 2532 // Don't save CSRs in 32-bit EH funclets. The caller saves EBX, EBP, ESI, EDI 2533 // for us, and there are no XMM CSRs on Win32. 2534 if (MBB.isEHFuncletEntry() && STI.is32Bit() && STI.isOSWindows()) 2535 return true; 2536 2537 // Push GPRs. It increases frame size. 2538 const MachineFunction &MF = *MBB.getParent(); 2539 unsigned Opc = STI.is64Bit() ? X86::PUSH64r : X86::PUSH32r; 2540 for (unsigned i = CSI.size(); i != 0; --i) { 2541 unsigned Reg = CSI[i - 1].getReg(); 2542 2543 if (!X86::GR64RegClass.contains(Reg) && !X86::GR32RegClass.contains(Reg)) 2544 continue; 2545 2546 const MachineRegisterInfo &MRI = MF.getRegInfo(); 2547 bool isLiveIn = MRI.isLiveIn(Reg); 2548 if (!isLiveIn) 2549 MBB.addLiveIn(Reg); 2550 2551 // Decide whether we can add a kill flag to the use. 2552 bool CanKill = !isLiveIn; 2553 // Check if any subregister is live-in 2554 if (CanKill) { 2555 for (MCRegAliasIterator AReg(Reg, TRI, false); AReg.isValid(); ++AReg) { 2556 if (MRI.isLiveIn(*AReg)) { 2557 CanKill = false; 2558 break; 2559 } 2560 } 2561 } 2562 2563 // Do not set a kill flag on values that are also marked as live-in. This 2564 // happens with the @llvm-returnaddress intrinsic and with arguments 2565 // passed in callee saved registers. 2566 // Omitting the kill flags is conservatively correct even if the live-in 2567 // is not used after all. 2568 BuildMI(MBB, MI, DL, TII.get(Opc)).addReg(Reg, getKillRegState(CanKill)) 2569 .setMIFlag(MachineInstr::FrameSetup); 2570 } 2571 2572 // Make XMM regs spilled. X86 does not have ability of push/pop XMM. 2573 // It can be done by spilling XMMs to stack frame. 2574 for (unsigned i = CSI.size(); i != 0; --i) { 2575 unsigned Reg = CSI[i-1].getReg(); 2576 if (X86::GR64RegClass.contains(Reg) || X86::GR32RegClass.contains(Reg)) 2577 continue; 2578 2579 // If this is k-register make sure we lookup via the largest legal type. 2580 MVT VT = MVT::Other; 2581 if (X86::VK16RegClass.contains(Reg)) 2582 VT = STI.hasBWI() ? MVT::v64i1 : MVT::v16i1; 2583 2584 // Add the callee-saved register as live-in. It's killed at the spill. 2585 MBB.addLiveIn(Reg); 2586 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg, VT); 2587 2588 TII.storeRegToStackSlot(MBB, MI, Reg, true, CSI[i - 1].getFrameIdx(), RC, 2589 TRI); 2590 --MI; 2591 MI->setFlag(MachineInstr::FrameSetup); 2592 ++MI; 2593 } 2594 2595 return true; 2596 } 2597 2598 void X86FrameLowering::emitCatchRetReturnValue(MachineBasicBlock &MBB, 2599 MachineBasicBlock::iterator MBBI, 2600 MachineInstr *CatchRet) const { 2601 // SEH shouldn't use catchret. 2602 assert(!isAsynchronousEHPersonality(classifyEHPersonality( 2603 MBB.getParent()->getFunction().getPersonalityFn())) && 2604 "SEH should not use CATCHRET"); 2605 const DebugLoc &DL = CatchRet->getDebugLoc(); 2606 MachineBasicBlock *CatchRetTarget = CatchRet->getOperand(0).getMBB(); 2607 2608 // Fill EAX/RAX with the address of the target block. 2609 if (STI.is64Bit()) { 2610 // LEA64r CatchRetTarget(%rip), %rax 2611 BuildMI(MBB, MBBI, DL, TII.get(X86::LEA64r), X86::RAX) 2612 .addReg(X86::RIP) 2613 .addImm(0) 2614 .addReg(0) 2615 .addMBB(CatchRetTarget) 2616 .addReg(0); 2617 } else { 2618 // MOV32ri $CatchRetTarget, %eax 2619 BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32ri), X86::EAX) 2620 .addMBB(CatchRetTarget); 2621 } 2622 2623 // Record that we've taken the address of CatchRetTarget and no longer just 2624 // reference it in a terminator. 2625 CatchRetTarget->setHasAddressTaken(); 2626 } 2627 2628 bool X86FrameLowering::restoreCalleeSavedRegisters( 2629 MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, 2630 MutableArrayRef<CalleeSavedInfo> CSI, const TargetRegisterInfo *TRI) const { 2631 if (CSI.empty()) 2632 return false; 2633 2634 if (MI != MBB.end() && isFuncletReturnInstr(*MI) && STI.isOSWindows()) { 2635 // Don't restore CSRs in 32-bit EH funclets. Matches 2636 // spillCalleeSavedRegisters. 2637 if (STI.is32Bit()) 2638 return true; 2639 // Don't restore CSRs before an SEH catchret. SEH except blocks do not form 2640 // funclets. emitEpilogue transforms these to normal jumps. 2641 if (MI->getOpcode() == X86::CATCHRET) { 2642 const Function &F = MBB.getParent()->getFunction(); 2643 bool IsSEH = isAsynchronousEHPersonality( 2644 classifyEHPersonality(F.getPersonalityFn())); 2645 if (IsSEH) 2646 return true; 2647 } 2648 } 2649 2650 DebugLoc DL = MBB.findDebugLoc(MI); 2651 2652 // Reload XMMs from stack frame. 2653 for (const CalleeSavedInfo &I : CSI) { 2654 unsigned Reg = I.getReg(); 2655 if (X86::GR64RegClass.contains(Reg) || 2656 X86::GR32RegClass.contains(Reg)) 2657 continue; 2658 2659 // If this is k-register make sure we lookup via the largest legal type. 2660 MVT VT = MVT::Other; 2661 if (X86::VK16RegClass.contains(Reg)) 2662 VT = STI.hasBWI() ? MVT::v64i1 : MVT::v16i1; 2663 2664 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg, VT); 2665 TII.loadRegFromStackSlot(MBB, MI, Reg, I.getFrameIdx(), RC, TRI); 2666 } 2667 2668 // POP GPRs. 2669 unsigned Opc = STI.is64Bit() ? X86::POP64r : X86::POP32r; 2670 for (const CalleeSavedInfo &I : CSI) { 2671 unsigned Reg = I.getReg(); 2672 if (!X86::GR64RegClass.contains(Reg) && 2673 !X86::GR32RegClass.contains(Reg)) 2674 continue; 2675 2676 BuildMI(MBB, MI, DL, TII.get(Opc), Reg) 2677 .setMIFlag(MachineInstr::FrameDestroy); 2678 } 2679 return true; 2680 } 2681 2682 void X86FrameLowering::determineCalleeSaves(MachineFunction &MF, 2683 BitVector &SavedRegs, 2684 RegScavenger *RS) const { 2685 TargetFrameLowering::determineCalleeSaves(MF, SavedRegs, RS); 2686 2687 // Spill the BasePtr if it's used. 2688 if (TRI->hasBasePointer(MF)){ 2689 Register BasePtr = TRI->getBaseRegister(); 2690 if (STI.isTarget64BitILP32()) 2691 BasePtr = getX86SubSuperRegister(BasePtr, 64); 2692 SavedRegs.set(BasePtr); 2693 } 2694 } 2695 2696 static bool 2697 HasNestArgument(const MachineFunction *MF) { 2698 const Function &F = MF->getFunction(); 2699 for (Function::const_arg_iterator I = F.arg_begin(), E = F.arg_end(); 2700 I != E; I++) { 2701 if (I->hasNestAttr() && !I->use_empty()) 2702 return true; 2703 } 2704 return false; 2705 } 2706 2707 /// GetScratchRegister - Get a temp register for performing work in the 2708 /// segmented stack and the Erlang/HiPE stack prologue. Depending on platform 2709 /// and the properties of the function either one or two registers will be 2710 /// needed. Set primary to true for the first register, false for the second. 2711 static unsigned 2712 GetScratchRegister(bool Is64Bit, bool IsLP64, const MachineFunction &MF, bool Primary) { 2713 CallingConv::ID CallingConvention = MF.getFunction().getCallingConv(); 2714 2715 // Erlang stuff. 2716 if (CallingConvention == CallingConv::HiPE) { 2717 if (Is64Bit) 2718 return Primary ? X86::R14 : X86::R13; 2719 else 2720 return Primary ? X86::EBX : X86::EDI; 2721 } 2722 2723 if (Is64Bit) { 2724 if (IsLP64) 2725 return Primary ? X86::R11 : X86::R12; 2726 else 2727 return Primary ? X86::R11D : X86::R12D; 2728 } 2729 2730 bool IsNested = HasNestArgument(&MF); 2731 2732 if (CallingConvention == CallingConv::X86_FastCall || 2733 CallingConvention == CallingConv::Fast || 2734 CallingConvention == CallingConv::Tail) { 2735 if (IsNested) 2736 report_fatal_error("Segmented stacks does not support fastcall with " 2737 "nested function."); 2738 return Primary ? X86::EAX : X86::ECX; 2739 } 2740 if (IsNested) 2741 return Primary ? X86::EDX : X86::EAX; 2742 return Primary ? X86::ECX : X86::EAX; 2743 } 2744 2745 // The stack limit in the TCB is set to this many bytes above the actual stack 2746 // limit. 2747 static const uint64_t kSplitStackAvailable = 256; 2748 2749 void X86FrameLowering::adjustForSegmentedStacks( 2750 MachineFunction &MF, MachineBasicBlock &PrologueMBB) const { 2751 MachineFrameInfo &MFI = MF.getFrameInfo(); 2752 uint64_t StackSize; 2753 unsigned TlsReg, TlsOffset; 2754 DebugLoc DL; 2755 2756 // To support shrink-wrapping we would need to insert the new blocks 2757 // at the right place and update the branches to PrologueMBB. 2758 assert(&(*MF.begin()) == &PrologueMBB && "Shrink-wrapping not supported yet"); 2759 2760 unsigned ScratchReg = GetScratchRegister(Is64Bit, IsLP64, MF, true); 2761 assert(!MF.getRegInfo().isLiveIn(ScratchReg) && 2762 "Scratch register is live-in"); 2763 2764 if (MF.getFunction().isVarArg()) 2765 report_fatal_error("Segmented stacks do not support vararg functions."); 2766 if (!STI.isTargetLinux() && !STI.isTargetDarwin() && !STI.isTargetWin32() && 2767 !STI.isTargetWin64() && !STI.isTargetFreeBSD() && 2768 !STI.isTargetDragonFly()) 2769 report_fatal_error("Segmented stacks not supported on this platform."); 2770 2771 // Eventually StackSize will be calculated by a link-time pass; which will 2772 // also decide whether checking code needs to be injected into this particular 2773 // prologue. 2774 StackSize = MFI.getStackSize(); 2775 2776 // Do not generate a prologue for leaf functions with a stack of size zero. 2777 // For non-leaf functions we have to allow for the possibility that the 2778 // callis to a non-split function, as in PR37807. This function could also 2779 // take the address of a non-split function. When the linker tries to adjust 2780 // its non-existent prologue, it would fail with an error. Mark the object 2781 // file so that such failures are not errors. See this Go language bug-report 2782 // https://go-review.googlesource.com/c/go/+/148819/ 2783 if (StackSize == 0 && !MFI.hasTailCall()) { 2784 MF.getMMI().setHasNosplitStack(true); 2785 return; 2786 } 2787 2788 MachineBasicBlock *allocMBB = MF.CreateMachineBasicBlock(); 2789 MachineBasicBlock *checkMBB = MF.CreateMachineBasicBlock(); 2790 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); 2791 bool IsNested = false; 2792 2793 // We need to know if the function has a nest argument only in 64 bit mode. 2794 if (Is64Bit) 2795 IsNested = HasNestArgument(&MF); 2796 2797 // The MOV R10, RAX needs to be in a different block, since the RET we emit in 2798 // allocMBB needs to be last (terminating) instruction. 2799 2800 for (const auto &LI : PrologueMBB.liveins()) { 2801 allocMBB->addLiveIn(LI); 2802 checkMBB->addLiveIn(LI); 2803 } 2804 2805 if (IsNested) 2806 allocMBB->addLiveIn(IsLP64 ? X86::R10 : X86::R10D); 2807 2808 MF.push_front(allocMBB); 2809 MF.push_front(checkMBB); 2810 2811 // When the frame size is less than 256 we just compare the stack 2812 // boundary directly to the value of the stack pointer, per gcc. 2813 bool CompareStackPointer = StackSize < kSplitStackAvailable; 2814 2815 // Read the limit off the current stacklet off the stack_guard location. 2816 if (Is64Bit) { 2817 if (STI.isTargetLinux()) { 2818 TlsReg = X86::FS; 2819 TlsOffset = IsLP64 ? 0x70 : 0x40; 2820 } else if (STI.isTargetDarwin()) { 2821 TlsReg = X86::GS; 2822 TlsOffset = 0x60 + 90*8; // See pthread_machdep.h. Steal TLS slot 90. 2823 } else if (STI.isTargetWin64()) { 2824 TlsReg = X86::GS; 2825 TlsOffset = 0x28; // pvArbitrary, reserved for application use 2826 } else if (STI.isTargetFreeBSD()) { 2827 TlsReg = X86::FS; 2828 TlsOffset = 0x18; 2829 } else if (STI.isTargetDragonFly()) { 2830 TlsReg = X86::FS; 2831 TlsOffset = 0x20; // use tls_tcb.tcb_segstack 2832 } else { 2833 report_fatal_error("Segmented stacks not supported on this platform."); 2834 } 2835 2836 if (CompareStackPointer) 2837 ScratchReg = IsLP64 ? X86::RSP : X86::ESP; 2838 else 2839 BuildMI(checkMBB, DL, TII.get(IsLP64 ? X86::LEA64r : X86::LEA64_32r), ScratchReg).addReg(X86::RSP) 2840 .addImm(1).addReg(0).addImm(-StackSize).addReg(0); 2841 2842 BuildMI(checkMBB, DL, TII.get(IsLP64 ? X86::CMP64rm : X86::CMP32rm)).addReg(ScratchReg) 2843 .addReg(0).addImm(1).addReg(0).addImm(TlsOffset).addReg(TlsReg); 2844 } else { 2845 if (STI.isTargetLinux()) { 2846 TlsReg = X86::GS; 2847 TlsOffset = 0x30; 2848 } else if (STI.isTargetDarwin()) { 2849 TlsReg = X86::GS; 2850 TlsOffset = 0x48 + 90*4; 2851 } else if (STI.isTargetWin32()) { 2852 TlsReg = X86::FS; 2853 TlsOffset = 0x14; // pvArbitrary, reserved for application use 2854 } else if (STI.isTargetDragonFly()) { 2855 TlsReg = X86::FS; 2856 TlsOffset = 0x10; // use tls_tcb.tcb_segstack 2857 } else if (STI.isTargetFreeBSD()) { 2858 report_fatal_error("Segmented stacks not supported on FreeBSD i386."); 2859 } else { 2860 report_fatal_error("Segmented stacks not supported on this platform."); 2861 } 2862 2863 if (CompareStackPointer) 2864 ScratchReg = X86::ESP; 2865 else 2866 BuildMI(checkMBB, DL, TII.get(X86::LEA32r), ScratchReg).addReg(X86::ESP) 2867 .addImm(1).addReg(0).addImm(-StackSize).addReg(0); 2868 2869 if (STI.isTargetLinux() || STI.isTargetWin32() || STI.isTargetWin64() || 2870 STI.isTargetDragonFly()) { 2871 BuildMI(checkMBB, DL, TII.get(X86::CMP32rm)).addReg(ScratchReg) 2872 .addReg(0).addImm(0).addReg(0).addImm(TlsOffset).addReg(TlsReg); 2873 } else if (STI.isTargetDarwin()) { 2874 2875 // TlsOffset doesn't fit into a mod r/m byte so we need an extra register. 2876 unsigned ScratchReg2; 2877 bool SaveScratch2; 2878 if (CompareStackPointer) { 2879 // The primary scratch register is available for holding the TLS offset. 2880 ScratchReg2 = GetScratchRegister(Is64Bit, IsLP64, MF, true); 2881 SaveScratch2 = false; 2882 } else { 2883 // Need to use a second register to hold the TLS offset 2884 ScratchReg2 = GetScratchRegister(Is64Bit, IsLP64, MF, false); 2885 2886 // Unfortunately, with fastcc the second scratch register may hold an 2887 // argument. 2888 SaveScratch2 = MF.getRegInfo().isLiveIn(ScratchReg2); 2889 } 2890 2891 // If Scratch2 is live-in then it needs to be saved. 2892 assert((!MF.getRegInfo().isLiveIn(ScratchReg2) || SaveScratch2) && 2893 "Scratch register is live-in and not saved"); 2894 2895 if (SaveScratch2) 2896 BuildMI(checkMBB, DL, TII.get(X86::PUSH32r)) 2897 .addReg(ScratchReg2, RegState::Kill); 2898 2899 BuildMI(checkMBB, DL, TII.get(X86::MOV32ri), ScratchReg2) 2900 .addImm(TlsOffset); 2901 BuildMI(checkMBB, DL, TII.get(X86::CMP32rm)) 2902 .addReg(ScratchReg) 2903 .addReg(ScratchReg2).addImm(1).addReg(0) 2904 .addImm(0) 2905 .addReg(TlsReg); 2906 2907 if (SaveScratch2) 2908 BuildMI(checkMBB, DL, TII.get(X86::POP32r), ScratchReg2); 2909 } 2910 } 2911 2912 // This jump is taken if SP >= (Stacklet Limit + Stack Space required). 2913 // It jumps to normal execution of the function body. 2914 BuildMI(checkMBB, DL, TII.get(X86::JCC_1)).addMBB(&PrologueMBB).addImm(X86::COND_A); 2915 2916 // On 32 bit we first push the arguments size and then the frame size. On 64 2917 // bit, we pass the stack frame size in r10 and the argument size in r11. 2918 if (Is64Bit) { 2919 // Functions with nested arguments use R10, so it needs to be saved across 2920 // the call to _morestack 2921 2922 const unsigned RegAX = IsLP64 ? X86::RAX : X86::EAX; 2923 const unsigned Reg10 = IsLP64 ? X86::R10 : X86::R10D; 2924 const unsigned Reg11 = IsLP64 ? X86::R11 : X86::R11D; 2925 const unsigned MOVrr = IsLP64 ? X86::MOV64rr : X86::MOV32rr; 2926 const unsigned MOVri = IsLP64 ? X86::MOV64ri : X86::MOV32ri; 2927 2928 if (IsNested) 2929 BuildMI(allocMBB, DL, TII.get(MOVrr), RegAX).addReg(Reg10); 2930 2931 BuildMI(allocMBB, DL, TII.get(MOVri), Reg10) 2932 .addImm(StackSize); 2933 BuildMI(allocMBB, DL, TII.get(MOVri), Reg11) 2934 .addImm(X86FI->getArgumentStackSize()); 2935 } else { 2936 BuildMI(allocMBB, DL, TII.get(X86::PUSHi32)) 2937 .addImm(X86FI->getArgumentStackSize()); 2938 BuildMI(allocMBB, DL, TII.get(X86::PUSHi32)) 2939 .addImm(StackSize); 2940 } 2941 2942 // __morestack is in libgcc 2943 if (Is64Bit && MF.getTarget().getCodeModel() == CodeModel::Large) { 2944 // Under the large code model, we cannot assume that __morestack lives 2945 // within 2^31 bytes of the call site, so we cannot use pc-relative 2946 // addressing. We cannot perform the call via a temporary register, 2947 // as the rax register may be used to store the static chain, and all 2948 // other suitable registers may be either callee-save or used for 2949 // parameter passing. We cannot use the stack at this point either 2950 // because __morestack manipulates the stack directly. 2951 // 2952 // To avoid these issues, perform an indirect call via a read-only memory 2953 // location containing the address. 2954 // 2955 // This solution is not perfect, as it assumes that the .rodata section 2956 // is laid out within 2^31 bytes of each function body, but this seems 2957 // to be sufficient for JIT. 2958 // FIXME: Add retpoline support and remove the error here.. 2959 if (STI.useIndirectThunkCalls()) 2960 report_fatal_error("Emitting morestack calls on 64-bit with the large " 2961 "code model and thunks not yet implemented."); 2962 BuildMI(allocMBB, DL, TII.get(X86::CALL64m)) 2963 .addReg(X86::RIP) 2964 .addImm(0) 2965 .addReg(0) 2966 .addExternalSymbol("__morestack_addr") 2967 .addReg(0); 2968 MF.getMMI().setUsesMorestackAddr(true); 2969 } else { 2970 if (Is64Bit) 2971 BuildMI(allocMBB, DL, TII.get(X86::CALL64pcrel32)) 2972 .addExternalSymbol("__morestack"); 2973 else 2974 BuildMI(allocMBB, DL, TII.get(X86::CALLpcrel32)) 2975 .addExternalSymbol("__morestack"); 2976 } 2977 2978 if (IsNested) 2979 BuildMI(allocMBB, DL, TII.get(X86::MORESTACK_RET_RESTORE_R10)); 2980 else 2981 BuildMI(allocMBB, DL, TII.get(X86::MORESTACK_RET)); 2982 2983 allocMBB->addSuccessor(&PrologueMBB); 2984 2985 checkMBB->addSuccessor(allocMBB, BranchProbability::getZero()); 2986 checkMBB->addSuccessor(&PrologueMBB, BranchProbability::getOne()); 2987 2988 #ifdef EXPENSIVE_CHECKS 2989 MF.verify(); 2990 #endif 2991 } 2992 2993 /// Lookup an ERTS parameter in the !hipe.literals named metadata node. 2994 /// HiPE provides Erlang Runtime System-internal parameters, such as PCB offsets 2995 /// to fields it needs, through a named metadata node "hipe.literals" containing 2996 /// name-value pairs. 2997 static unsigned getHiPELiteral( 2998 NamedMDNode *HiPELiteralsMD, const StringRef LiteralName) { 2999 for (int i = 0, e = HiPELiteralsMD->getNumOperands(); i != e; ++i) { 3000 MDNode *Node = HiPELiteralsMD->getOperand(i); 3001 if (Node->getNumOperands() != 2) continue; 3002 MDString *NodeName = dyn_cast<MDString>(Node->getOperand(0)); 3003 ValueAsMetadata *NodeVal = dyn_cast<ValueAsMetadata>(Node->getOperand(1)); 3004 if (!NodeName || !NodeVal) continue; 3005 ConstantInt *ValConst = dyn_cast_or_null<ConstantInt>(NodeVal->getValue()); 3006 if (ValConst && NodeName->getString() == LiteralName) { 3007 return ValConst->getZExtValue(); 3008 } 3009 } 3010 3011 report_fatal_error("HiPE literal " + LiteralName 3012 + " required but not provided"); 3013 } 3014 3015 // Return true if there are no non-ehpad successors to MBB and there are no 3016 // non-meta instructions between MBBI and MBB.end(). 3017 static bool blockEndIsUnreachable(const MachineBasicBlock &MBB, 3018 MachineBasicBlock::const_iterator MBBI) { 3019 return llvm::all_of( 3020 MBB.successors(), 3021 [](const MachineBasicBlock *Succ) { return Succ->isEHPad(); }) && 3022 std::all_of(MBBI, MBB.end(), [](const MachineInstr &MI) { 3023 return MI.isMetaInstruction(); 3024 }); 3025 } 3026 3027 /// Erlang programs may need a special prologue to handle the stack size they 3028 /// might need at runtime. That is because Erlang/OTP does not implement a C 3029 /// stack but uses a custom implementation of hybrid stack/heap architecture. 3030 /// (for more information see Eric Stenman's Ph.D. thesis: 3031 /// http://publications.uu.se/uu/fulltext/nbn_se_uu_diva-2688.pdf) 3032 /// 3033 /// CheckStack: 3034 /// temp0 = sp - MaxStack 3035 /// if( temp0 < SP_LIMIT(P) ) goto IncStack else goto OldStart 3036 /// OldStart: 3037 /// ... 3038 /// IncStack: 3039 /// call inc_stack # doubles the stack space 3040 /// temp0 = sp - MaxStack 3041 /// if( temp0 < SP_LIMIT(P) ) goto IncStack else goto OldStart 3042 void X86FrameLowering::adjustForHiPEPrologue( 3043 MachineFunction &MF, MachineBasicBlock &PrologueMBB) const { 3044 MachineFrameInfo &MFI = MF.getFrameInfo(); 3045 DebugLoc DL; 3046 3047 // To support shrink-wrapping we would need to insert the new blocks 3048 // at the right place and update the branches to PrologueMBB. 3049 assert(&(*MF.begin()) == &PrologueMBB && "Shrink-wrapping not supported yet"); 3050 3051 // HiPE-specific values 3052 NamedMDNode *HiPELiteralsMD = MF.getMMI().getModule() 3053 ->getNamedMetadata("hipe.literals"); 3054 if (!HiPELiteralsMD) 3055 report_fatal_error( 3056 "Can't generate HiPE prologue without runtime parameters"); 3057 const unsigned HipeLeafWords 3058 = getHiPELiteral(HiPELiteralsMD, 3059 Is64Bit ? "AMD64_LEAF_WORDS" : "X86_LEAF_WORDS"); 3060 const unsigned CCRegisteredArgs = Is64Bit ? 6 : 5; 3061 const unsigned Guaranteed = HipeLeafWords * SlotSize; 3062 unsigned CallerStkArity = MF.getFunction().arg_size() > CCRegisteredArgs ? 3063 MF.getFunction().arg_size() - CCRegisteredArgs : 0; 3064 unsigned MaxStack = MFI.getStackSize() + CallerStkArity*SlotSize + SlotSize; 3065 3066 assert(STI.isTargetLinux() && 3067 "HiPE prologue is only supported on Linux operating systems."); 3068 3069 // Compute the largest caller's frame that is needed to fit the callees' 3070 // frames. This 'MaxStack' is computed from: 3071 // 3072 // a) the fixed frame size, which is the space needed for all spilled temps, 3073 // b) outgoing on-stack parameter areas, and 3074 // c) the minimum stack space this function needs to make available for the 3075 // functions it calls (a tunable ABI property). 3076 if (MFI.hasCalls()) { 3077 unsigned MoreStackForCalls = 0; 3078 3079 for (auto &MBB : MF) { 3080 for (auto &MI : MBB) { 3081 if (!MI.isCall()) 3082 continue; 3083 3084 // Get callee operand. 3085 const MachineOperand &MO = MI.getOperand(0); 3086 3087 // Only take account of global function calls (no closures etc.). 3088 if (!MO.isGlobal()) 3089 continue; 3090 3091 const Function *F = dyn_cast<Function>(MO.getGlobal()); 3092 if (!F) 3093 continue; 3094 3095 // Do not update 'MaxStack' for primitive and built-in functions 3096 // (encoded with names either starting with "erlang."/"bif_" or not 3097 // having a ".", such as a simple <Module>.<Function>.<Arity>, or an 3098 // "_", such as the BIF "suspend_0") as they are executed on another 3099 // stack. 3100 if (F->getName().contains("erlang.") || F->getName().contains("bif_") || 3101 F->getName().find_first_of("._") == StringRef::npos) 3102 continue; 3103 3104 unsigned CalleeStkArity = 3105 F->arg_size() > CCRegisteredArgs ? F->arg_size()-CCRegisteredArgs : 0; 3106 if (HipeLeafWords - 1 > CalleeStkArity) 3107 MoreStackForCalls = std::max(MoreStackForCalls, 3108 (HipeLeafWords - 1 - CalleeStkArity) * SlotSize); 3109 } 3110 } 3111 MaxStack += MoreStackForCalls; 3112 } 3113 3114 // If the stack frame needed is larger than the guaranteed then runtime checks 3115 // and calls to "inc_stack_0" BIF should be inserted in the assembly prologue. 3116 if (MaxStack > Guaranteed) { 3117 MachineBasicBlock *stackCheckMBB = MF.CreateMachineBasicBlock(); 3118 MachineBasicBlock *incStackMBB = MF.CreateMachineBasicBlock(); 3119 3120 for (const auto &LI : PrologueMBB.liveins()) { 3121 stackCheckMBB->addLiveIn(LI); 3122 incStackMBB->addLiveIn(LI); 3123 } 3124 3125 MF.push_front(incStackMBB); 3126 MF.push_front(stackCheckMBB); 3127 3128 unsigned ScratchReg, SPReg, PReg, SPLimitOffset; 3129 unsigned LEAop, CMPop, CALLop; 3130 SPLimitOffset = getHiPELiteral(HiPELiteralsMD, "P_NSP_LIMIT"); 3131 if (Is64Bit) { 3132 SPReg = X86::RSP; 3133 PReg = X86::RBP; 3134 LEAop = X86::LEA64r; 3135 CMPop = X86::CMP64rm; 3136 CALLop = X86::CALL64pcrel32; 3137 } else { 3138 SPReg = X86::ESP; 3139 PReg = X86::EBP; 3140 LEAop = X86::LEA32r; 3141 CMPop = X86::CMP32rm; 3142 CALLop = X86::CALLpcrel32; 3143 } 3144 3145 ScratchReg = GetScratchRegister(Is64Bit, IsLP64, MF, true); 3146 assert(!MF.getRegInfo().isLiveIn(ScratchReg) && 3147 "HiPE prologue scratch register is live-in"); 3148 3149 // Create new MBB for StackCheck: 3150 addRegOffset(BuildMI(stackCheckMBB, DL, TII.get(LEAop), ScratchReg), 3151 SPReg, false, -MaxStack); 3152 // SPLimitOffset is in a fixed heap location (pointed by BP). 3153 addRegOffset(BuildMI(stackCheckMBB, DL, TII.get(CMPop)) 3154 .addReg(ScratchReg), PReg, false, SPLimitOffset); 3155 BuildMI(stackCheckMBB, DL, TII.get(X86::JCC_1)).addMBB(&PrologueMBB).addImm(X86::COND_AE); 3156 3157 // Create new MBB for IncStack: 3158 BuildMI(incStackMBB, DL, TII.get(CALLop)). 3159 addExternalSymbol("inc_stack_0"); 3160 addRegOffset(BuildMI(incStackMBB, DL, TII.get(LEAop), ScratchReg), 3161 SPReg, false, -MaxStack); 3162 addRegOffset(BuildMI(incStackMBB, DL, TII.get(CMPop)) 3163 .addReg(ScratchReg), PReg, false, SPLimitOffset); 3164 BuildMI(incStackMBB, DL, TII.get(X86::JCC_1)).addMBB(incStackMBB).addImm(X86::COND_LE); 3165 3166 stackCheckMBB->addSuccessor(&PrologueMBB, {99, 100}); 3167 stackCheckMBB->addSuccessor(incStackMBB, {1, 100}); 3168 incStackMBB->addSuccessor(&PrologueMBB, {99, 100}); 3169 incStackMBB->addSuccessor(incStackMBB, {1, 100}); 3170 } 3171 #ifdef EXPENSIVE_CHECKS 3172 MF.verify(); 3173 #endif 3174 } 3175 3176 bool X86FrameLowering::adjustStackWithPops(MachineBasicBlock &MBB, 3177 MachineBasicBlock::iterator MBBI, 3178 const DebugLoc &DL, 3179 int Offset) const { 3180 if (Offset <= 0) 3181 return false; 3182 3183 if (Offset % SlotSize) 3184 return false; 3185 3186 int NumPops = Offset / SlotSize; 3187 // This is only worth it if we have at most 2 pops. 3188 if (NumPops != 1 && NumPops != 2) 3189 return false; 3190 3191 // Handle only the trivial case where the adjustment directly follows 3192 // a call. This is the most common one, anyway. 3193 if (MBBI == MBB.begin()) 3194 return false; 3195 MachineBasicBlock::iterator Prev = std::prev(MBBI); 3196 if (!Prev->isCall() || !Prev->getOperand(1).isRegMask()) 3197 return false; 3198 3199 unsigned Regs[2]; 3200 unsigned FoundRegs = 0; 3201 3202 const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 3203 const MachineOperand &RegMask = Prev->getOperand(1); 3204 3205 auto &RegClass = 3206 Is64Bit ? X86::GR64_NOREX_NOSPRegClass : X86::GR32_NOREX_NOSPRegClass; 3207 // Try to find up to NumPops free registers. 3208 for (auto Candidate : RegClass) { 3209 // Poor man's liveness: 3210 // Since we're immediately after a call, any register that is clobbered 3211 // by the call and not defined by it can be considered dead. 3212 if (!RegMask.clobbersPhysReg(Candidate)) 3213 continue; 3214 3215 // Don't clobber reserved registers 3216 if (MRI.isReserved(Candidate)) 3217 continue; 3218 3219 bool IsDef = false; 3220 for (const MachineOperand &MO : Prev->implicit_operands()) { 3221 if (MO.isReg() && MO.isDef() && 3222 TRI->isSuperOrSubRegisterEq(MO.getReg(), Candidate)) { 3223 IsDef = true; 3224 break; 3225 } 3226 } 3227 3228 if (IsDef) 3229 continue; 3230 3231 Regs[FoundRegs++] = Candidate; 3232 if (FoundRegs == (unsigned)NumPops) 3233 break; 3234 } 3235 3236 if (FoundRegs == 0) 3237 return false; 3238 3239 // If we found only one free register, but need two, reuse the same one twice. 3240 while (FoundRegs < (unsigned)NumPops) 3241 Regs[FoundRegs++] = Regs[0]; 3242 3243 for (int i = 0; i < NumPops; ++i) 3244 BuildMI(MBB, MBBI, DL, 3245 TII.get(STI.is64Bit() ? X86::POP64r : X86::POP32r), Regs[i]); 3246 3247 return true; 3248 } 3249 3250 MachineBasicBlock::iterator X86FrameLowering:: 3251 eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB, 3252 MachineBasicBlock::iterator I) const { 3253 bool reserveCallFrame = hasReservedCallFrame(MF); 3254 unsigned Opcode = I->getOpcode(); 3255 bool isDestroy = Opcode == TII.getCallFrameDestroyOpcode(); 3256 DebugLoc DL = I->getDebugLoc(); // copy DebugLoc as I will be erased. 3257 uint64_t Amount = TII.getFrameSize(*I); 3258 uint64_t InternalAmt = (isDestroy || Amount) ? TII.getFrameAdjustment(*I) : 0; 3259 I = MBB.erase(I); 3260 auto InsertPos = skipDebugInstructionsForward(I, MBB.end()); 3261 3262 // Try to avoid emitting dead SP adjustments if the block end is unreachable, 3263 // typically because the function is marked noreturn (abort, throw, 3264 // assert_fail, etc). 3265 if (isDestroy && blockEndIsUnreachable(MBB, I)) 3266 return I; 3267 3268 if (!reserveCallFrame) { 3269 // If the stack pointer can be changed after prologue, turn the 3270 // adjcallstackup instruction into a 'sub ESP, <amt>' and the 3271 // adjcallstackdown instruction into 'add ESP, <amt>' 3272 3273 // We need to keep the stack aligned properly. To do this, we round the 3274 // amount of space needed for the outgoing arguments up to the next 3275 // alignment boundary. 3276 Amount = alignTo(Amount, getStackAlign()); 3277 3278 const Function &F = MF.getFunction(); 3279 bool WindowsCFI = MF.getTarget().getMCAsmInfo()->usesWindowsCFI(); 3280 bool DwarfCFI = !WindowsCFI && MF.needsFrameMoves(); 3281 3282 // If we have any exception handlers in this function, and we adjust 3283 // the SP before calls, we may need to indicate this to the unwinder 3284 // using GNU_ARGS_SIZE. Note that this may be necessary even when 3285 // Amount == 0, because the preceding function may have set a non-0 3286 // GNU_ARGS_SIZE. 3287 // TODO: We don't need to reset this between subsequent functions, 3288 // if it didn't change. 3289 bool HasDwarfEHHandlers = !WindowsCFI && !MF.getLandingPads().empty(); 3290 3291 if (HasDwarfEHHandlers && !isDestroy && 3292 MF.getInfo<X86MachineFunctionInfo>()->getHasPushSequences()) 3293 BuildCFI(MBB, InsertPos, DL, 3294 MCCFIInstruction::createGnuArgsSize(nullptr, Amount)); 3295 3296 if (Amount == 0) 3297 return I; 3298 3299 // Factor out the amount that gets handled inside the sequence 3300 // (Pushes of argument for frame setup, callee pops for frame destroy) 3301 Amount -= InternalAmt; 3302 3303 // TODO: This is needed only if we require precise CFA. 3304 // If this is a callee-pop calling convention, emit a CFA adjust for 3305 // the amount the callee popped. 3306 if (isDestroy && InternalAmt && DwarfCFI && !hasFP(MF)) 3307 BuildCFI(MBB, InsertPos, DL, 3308 MCCFIInstruction::createAdjustCfaOffset(nullptr, -InternalAmt)); 3309 3310 // Add Amount to SP to destroy a frame, or subtract to setup. 3311 int64_t StackAdjustment = isDestroy ? Amount : -Amount; 3312 3313 if (StackAdjustment) { 3314 // Merge with any previous or following adjustment instruction. Note: the 3315 // instructions merged with here do not have CFI, so their stack 3316 // adjustments do not feed into CfaAdjustment. 3317 StackAdjustment += mergeSPUpdates(MBB, InsertPos, true); 3318 StackAdjustment += mergeSPUpdates(MBB, InsertPos, false); 3319 3320 if (StackAdjustment) { 3321 if (!(F.hasMinSize() && 3322 adjustStackWithPops(MBB, InsertPos, DL, StackAdjustment))) 3323 BuildStackAdjustment(MBB, InsertPos, DL, StackAdjustment, 3324 /*InEpilogue=*/false); 3325 } 3326 } 3327 3328 if (DwarfCFI && !hasFP(MF)) { 3329 // If we don't have FP, but need to generate unwind information, 3330 // we need to set the correct CFA offset after the stack adjustment. 3331 // How much we adjust the CFA offset depends on whether we're emitting 3332 // CFI only for EH purposes or for debugging. EH only requires the CFA 3333 // offset to be correct at each call site, while for debugging we want 3334 // it to be more precise. 3335 3336 int64_t CfaAdjustment = -StackAdjustment; 3337 // TODO: When not using precise CFA, we also need to adjust for the 3338 // InternalAmt here. 3339 if (CfaAdjustment) { 3340 BuildCFI(MBB, InsertPos, DL, 3341 MCCFIInstruction::createAdjustCfaOffset(nullptr, 3342 CfaAdjustment)); 3343 } 3344 } 3345 3346 return I; 3347 } 3348 3349 if (InternalAmt) { 3350 MachineBasicBlock::iterator CI = I; 3351 MachineBasicBlock::iterator B = MBB.begin(); 3352 while (CI != B && !std::prev(CI)->isCall()) 3353 --CI; 3354 BuildStackAdjustment(MBB, CI, DL, -InternalAmt, /*InEpilogue=*/false); 3355 } 3356 3357 return I; 3358 } 3359 3360 bool X86FrameLowering::canUseAsPrologue(const MachineBasicBlock &MBB) const { 3361 assert(MBB.getParent() && "Block is not attached to a function!"); 3362 const MachineFunction &MF = *MBB.getParent(); 3363 if (!MBB.isLiveIn(X86::EFLAGS)) 3364 return true; 3365 3366 const X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); 3367 return !TRI->hasStackRealignment(MF) && !X86FI->hasSwiftAsyncContext(); 3368 } 3369 3370 bool X86FrameLowering::canUseAsEpilogue(const MachineBasicBlock &MBB) const { 3371 assert(MBB.getParent() && "Block is not attached to a function!"); 3372 3373 // Win64 has strict requirements in terms of epilogue and we are 3374 // not taking a chance at messing with them. 3375 // I.e., unless this block is already an exit block, we can't use 3376 // it as an epilogue. 3377 if (STI.isTargetWin64() && !MBB.succ_empty() && !MBB.isReturnBlock()) 3378 return false; 3379 3380 // Swift async context epilogue has a BTR instruction that clobbers parts of 3381 // EFLAGS. 3382 const MachineFunction &MF = *MBB.getParent(); 3383 if (MF.getInfo<X86MachineFunctionInfo>()->hasSwiftAsyncContext()) 3384 return !flagsNeedToBePreservedBeforeTheTerminators(MBB); 3385 3386 if (canUseLEAForSPInEpilogue(*MBB.getParent())) 3387 return true; 3388 3389 // If we cannot use LEA to adjust SP, we may need to use ADD, which 3390 // clobbers the EFLAGS. Check that we do not need to preserve it, 3391 // otherwise, conservatively assume this is not 3392 // safe to insert the epilogue here. 3393 return !flagsNeedToBePreservedBeforeTheTerminators(MBB); 3394 } 3395 3396 bool X86FrameLowering::enableShrinkWrapping(const MachineFunction &MF) const { 3397 // If we may need to emit frameless compact unwind information, give 3398 // up as this is currently broken: PR25614. 3399 bool CompactUnwind = 3400 MF.getMMI().getContext().getObjectFileInfo()->getCompactUnwindSection() != 3401 nullptr; 3402 return (MF.getFunction().hasFnAttribute(Attribute::NoUnwind) || hasFP(MF) || 3403 !CompactUnwind) && 3404 // The lowering of segmented stack and HiPE only support entry 3405 // blocks as prologue blocks: PR26107. This limitation may be 3406 // lifted if we fix: 3407 // - adjustForSegmentedStacks 3408 // - adjustForHiPEPrologue 3409 MF.getFunction().getCallingConv() != CallingConv::HiPE && 3410 !MF.shouldSplitStack(); 3411 } 3412 3413 MachineBasicBlock::iterator X86FrameLowering::restoreWin32EHStackPointers( 3414 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, 3415 const DebugLoc &DL, bool RestoreSP) const { 3416 assert(STI.isTargetWindowsMSVC() && "funclets only supported in MSVC env"); 3417 assert(STI.isTargetWin32() && "EBP/ESI restoration only required on win32"); 3418 assert(STI.is32Bit() && !Uses64BitFramePtr && 3419 "restoring EBP/ESI on non-32-bit target"); 3420 3421 MachineFunction &MF = *MBB.getParent(); 3422 Register FramePtr = TRI->getFrameRegister(MF); 3423 Register BasePtr = TRI->getBaseRegister(); 3424 WinEHFuncInfo &FuncInfo = *MF.getWinEHFuncInfo(); 3425 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); 3426 MachineFrameInfo &MFI = MF.getFrameInfo(); 3427 3428 // FIXME: Don't set FrameSetup flag in catchret case. 3429 3430 int FI = FuncInfo.EHRegNodeFrameIndex; 3431 int EHRegSize = MFI.getObjectSize(FI); 3432 3433 if (RestoreSP) { 3434 // MOV32rm -EHRegSize(%ebp), %esp 3435 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32rm), X86::ESP), 3436 X86::EBP, true, -EHRegSize) 3437 .setMIFlag(MachineInstr::FrameSetup); 3438 } 3439 3440 Register UsedReg; 3441 int EHRegOffset = getFrameIndexReference(MF, FI, UsedReg).getFixed(); 3442 int EndOffset = -EHRegOffset - EHRegSize; 3443 FuncInfo.EHRegNodeEndOffset = EndOffset; 3444 3445 if (UsedReg == FramePtr) { 3446 // ADD $offset, %ebp 3447 unsigned ADDri = getADDriOpcode(false, EndOffset); 3448 BuildMI(MBB, MBBI, DL, TII.get(ADDri), FramePtr) 3449 .addReg(FramePtr) 3450 .addImm(EndOffset) 3451 .setMIFlag(MachineInstr::FrameSetup) 3452 ->getOperand(3) 3453 .setIsDead(); 3454 assert(EndOffset >= 0 && 3455 "end of registration object above normal EBP position!"); 3456 } else if (UsedReg == BasePtr) { 3457 // LEA offset(%ebp), %esi 3458 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::LEA32r), BasePtr), 3459 FramePtr, false, EndOffset) 3460 .setMIFlag(MachineInstr::FrameSetup); 3461 // MOV32rm SavedEBPOffset(%esi), %ebp 3462 assert(X86FI->getHasSEHFramePtrSave()); 3463 int Offset = 3464 getFrameIndexReference(MF, X86FI->getSEHFramePtrSaveIndex(), UsedReg) 3465 .getFixed(); 3466 assert(UsedReg == BasePtr); 3467 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32rm), FramePtr), 3468 UsedReg, true, Offset) 3469 .setMIFlag(MachineInstr::FrameSetup); 3470 } else { 3471 llvm_unreachable("32-bit frames with WinEH must use FramePtr or BasePtr"); 3472 } 3473 return MBBI; 3474 } 3475 3476 int X86FrameLowering::getInitialCFAOffset(const MachineFunction &MF) const { 3477 return TRI->getSlotSize(); 3478 } 3479 3480 Register 3481 X86FrameLowering::getInitialCFARegister(const MachineFunction &MF) const { 3482 return TRI->getDwarfRegNum(StackPtr, true); 3483 } 3484 3485 namespace { 3486 // Struct used by orderFrameObjects to help sort the stack objects. 3487 struct X86FrameSortingObject { 3488 bool IsValid = false; // true if we care about this Object. 3489 unsigned ObjectIndex = 0; // Index of Object into MFI list. 3490 unsigned ObjectSize = 0; // Size of Object in bytes. 3491 Align ObjectAlignment = Align(1); // Alignment of Object in bytes. 3492 unsigned ObjectNumUses = 0; // Object static number of uses. 3493 }; 3494 3495 // The comparison function we use for std::sort to order our local 3496 // stack symbols. The current algorithm is to use an estimated 3497 // "density". This takes into consideration the size and number of 3498 // uses each object has in order to roughly minimize code size. 3499 // So, for example, an object of size 16B that is referenced 5 times 3500 // will get higher priority than 4 4B objects referenced 1 time each. 3501 // It's not perfect and we may be able to squeeze a few more bytes out of 3502 // it (for example : 0(esp) requires fewer bytes, symbols allocated at the 3503 // fringe end can have special consideration, given their size is less 3504 // important, etc.), but the algorithmic complexity grows too much to be 3505 // worth the extra gains we get. This gets us pretty close. 3506 // The final order leaves us with objects with highest priority going 3507 // at the end of our list. 3508 struct X86FrameSortingComparator { 3509 inline bool operator()(const X86FrameSortingObject &A, 3510 const X86FrameSortingObject &B) const { 3511 uint64_t DensityAScaled, DensityBScaled; 3512 3513 // For consistency in our comparison, all invalid objects are placed 3514 // at the end. This also allows us to stop walking when we hit the 3515 // first invalid item after it's all sorted. 3516 if (!A.IsValid) 3517 return false; 3518 if (!B.IsValid) 3519 return true; 3520 3521 // The density is calculated by doing : 3522 // (double)DensityA = A.ObjectNumUses / A.ObjectSize 3523 // (double)DensityB = B.ObjectNumUses / B.ObjectSize 3524 // Since this approach may cause inconsistencies in 3525 // the floating point <, >, == comparisons, depending on the floating 3526 // point model with which the compiler was built, we're going 3527 // to scale both sides by multiplying with 3528 // A.ObjectSize * B.ObjectSize. This ends up factoring away 3529 // the division and, with it, the need for any floating point 3530 // arithmetic. 3531 DensityAScaled = static_cast<uint64_t>(A.ObjectNumUses) * 3532 static_cast<uint64_t>(B.ObjectSize); 3533 DensityBScaled = static_cast<uint64_t>(B.ObjectNumUses) * 3534 static_cast<uint64_t>(A.ObjectSize); 3535 3536 // If the two densities are equal, prioritize highest alignment 3537 // objects. This allows for similar alignment objects 3538 // to be packed together (given the same density). 3539 // There's room for improvement here, also, since we can pack 3540 // similar alignment (different density) objects next to each 3541 // other to save padding. This will also require further 3542 // complexity/iterations, and the overall gain isn't worth it, 3543 // in general. Something to keep in mind, though. 3544 if (DensityAScaled == DensityBScaled) 3545 return A.ObjectAlignment < B.ObjectAlignment; 3546 3547 return DensityAScaled < DensityBScaled; 3548 } 3549 }; 3550 } // namespace 3551 3552 // Order the symbols in the local stack. 3553 // We want to place the local stack objects in some sort of sensible order. 3554 // The heuristic we use is to try and pack them according to static number 3555 // of uses and size of object in order to minimize code size. 3556 void X86FrameLowering::orderFrameObjects( 3557 const MachineFunction &MF, SmallVectorImpl<int> &ObjectsToAllocate) const { 3558 const MachineFrameInfo &MFI = MF.getFrameInfo(); 3559 3560 // Don't waste time if there's nothing to do. 3561 if (ObjectsToAllocate.empty()) 3562 return; 3563 3564 // Create an array of all MFI objects. We won't need all of these 3565 // objects, but we're going to create a full array of them to make 3566 // it easier to index into when we're counting "uses" down below. 3567 // We want to be able to easily/cheaply access an object by simply 3568 // indexing into it, instead of having to search for it every time. 3569 std::vector<X86FrameSortingObject> SortingObjects(MFI.getObjectIndexEnd()); 3570 3571 // Walk the objects we care about and mark them as such in our working 3572 // struct. 3573 for (auto &Obj : ObjectsToAllocate) { 3574 SortingObjects[Obj].IsValid = true; 3575 SortingObjects[Obj].ObjectIndex = Obj; 3576 SortingObjects[Obj].ObjectAlignment = MFI.getObjectAlign(Obj); 3577 // Set the size. 3578 int ObjectSize = MFI.getObjectSize(Obj); 3579 if (ObjectSize == 0) 3580 // Variable size. Just use 4. 3581 SortingObjects[Obj].ObjectSize = 4; 3582 else 3583 SortingObjects[Obj].ObjectSize = ObjectSize; 3584 } 3585 3586 // Count the number of uses for each object. 3587 for (auto &MBB : MF) { 3588 for (auto &MI : MBB) { 3589 if (MI.isDebugInstr()) 3590 continue; 3591 for (const MachineOperand &MO : MI.operands()) { 3592 // Check to see if it's a local stack symbol. 3593 if (!MO.isFI()) 3594 continue; 3595 int Index = MO.getIndex(); 3596 // Check to see if it falls within our range, and is tagged 3597 // to require ordering. 3598 if (Index >= 0 && Index < MFI.getObjectIndexEnd() && 3599 SortingObjects[Index].IsValid) 3600 SortingObjects[Index].ObjectNumUses++; 3601 } 3602 } 3603 } 3604 3605 // Sort the objects using X86FrameSortingAlgorithm (see its comment for 3606 // info). 3607 llvm::stable_sort(SortingObjects, X86FrameSortingComparator()); 3608 3609 // Now modify the original list to represent the final order that 3610 // we want. The order will depend on whether we're going to access them 3611 // from the stack pointer or the frame pointer. For SP, the list should 3612 // end up with the END containing objects that we want with smaller offsets. 3613 // For FP, it should be flipped. 3614 int i = 0; 3615 for (auto &Obj : SortingObjects) { 3616 // All invalid items are sorted at the end, so it's safe to stop. 3617 if (!Obj.IsValid) 3618 break; 3619 ObjectsToAllocate[i++] = Obj.ObjectIndex; 3620 } 3621 3622 // Flip it if we're accessing off of the FP. 3623 if (!TRI->hasStackRealignment(MF) && hasFP(MF)) 3624 std::reverse(ObjectsToAllocate.begin(), ObjectsToAllocate.end()); 3625 } 3626 3627 3628 unsigned X86FrameLowering::getWinEHParentFrameOffset(const MachineFunction &MF) const { 3629 // RDX, the parent frame pointer, is homed into 16(%rsp) in the prologue. 3630 unsigned Offset = 16; 3631 // RBP is immediately pushed. 3632 Offset += SlotSize; 3633 // All callee-saved registers are then pushed. 3634 Offset += MF.getInfo<X86MachineFunctionInfo>()->getCalleeSavedFrameSize(); 3635 // Every funclet allocates enough stack space for the largest outgoing call. 3636 Offset += getWinEHFuncletFrameSize(MF); 3637 return Offset; 3638 } 3639 3640 void X86FrameLowering::processFunctionBeforeFrameFinalized( 3641 MachineFunction &MF, RegScavenger *RS) const { 3642 // Mark the function as not having WinCFI. We will set it back to true in 3643 // emitPrologue if it gets called and emits CFI. 3644 MF.setHasWinCFI(false); 3645 3646 // If we are using Windows x64 CFI, ensure that the stack is always 8 byte 3647 // aligned. The format doesn't support misaligned stack adjustments. 3648 if (MF.getTarget().getMCAsmInfo()->usesWindowsCFI()) 3649 MF.getFrameInfo().ensureMaxAlignment(Align(SlotSize)); 3650 3651 // If this function isn't doing Win64-style C++ EH, we don't need to do 3652 // anything. 3653 if (STI.is64Bit() && MF.hasEHFunclets() && 3654 classifyEHPersonality(MF.getFunction().getPersonalityFn()) == 3655 EHPersonality::MSVC_CXX) { 3656 adjustFrameForMsvcCxxEh(MF); 3657 } 3658 } 3659 3660 void X86FrameLowering::adjustFrameForMsvcCxxEh(MachineFunction &MF) const { 3661 // Win64 C++ EH needs to allocate the UnwindHelp object at some fixed offset 3662 // relative to RSP after the prologue. Find the offset of the last fixed 3663 // object, so that we can allocate a slot immediately following it. If there 3664 // were no fixed objects, use offset -SlotSize, which is immediately after the 3665 // return address. Fixed objects have negative frame indices. 3666 MachineFrameInfo &MFI = MF.getFrameInfo(); 3667 WinEHFuncInfo &EHInfo = *MF.getWinEHFuncInfo(); 3668 int64_t MinFixedObjOffset = -SlotSize; 3669 for (int I = MFI.getObjectIndexBegin(); I < 0; ++I) 3670 MinFixedObjOffset = std::min(MinFixedObjOffset, MFI.getObjectOffset(I)); 3671 3672 for (WinEHTryBlockMapEntry &TBME : EHInfo.TryBlockMap) { 3673 for (WinEHHandlerType &H : TBME.HandlerArray) { 3674 int FrameIndex = H.CatchObj.FrameIndex; 3675 if (FrameIndex != INT_MAX) { 3676 // Ensure alignment. 3677 unsigned Align = MFI.getObjectAlign(FrameIndex).value(); 3678 MinFixedObjOffset -= std::abs(MinFixedObjOffset) % Align; 3679 MinFixedObjOffset -= MFI.getObjectSize(FrameIndex); 3680 MFI.setObjectOffset(FrameIndex, MinFixedObjOffset); 3681 } 3682 } 3683 } 3684 3685 // Ensure alignment. 3686 MinFixedObjOffset -= std::abs(MinFixedObjOffset) % 8; 3687 int64_t UnwindHelpOffset = MinFixedObjOffset - SlotSize; 3688 int UnwindHelpFI = 3689 MFI.CreateFixedObject(SlotSize, UnwindHelpOffset, /*IsImmutable=*/false); 3690 EHInfo.UnwindHelpFrameIdx = UnwindHelpFI; 3691 3692 // Store -2 into UnwindHelp on function entry. We have to scan forwards past 3693 // other frame setup instructions. 3694 MachineBasicBlock &MBB = MF.front(); 3695 auto MBBI = MBB.begin(); 3696 while (MBBI != MBB.end() && MBBI->getFlag(MachineInstr::FrameSetup)) 3697 ++MBBI; 3698 3699 DebugLoc DL = MBB.findDebugLoc(MBBI); 3700 addFrameReference(BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64mi32)), 3701 UnwindHelpFI) 3702 .addImm(-2); 3703 } 3704 3705 void X86FrameLowering::processFunctionBeforeFrameIndicesReplaced( 3706 MachineFunction &MF, RegScavenger *RS) const { 3707 if (STI.is32Bit() && MF.hasEHFunclets()) 3708 restoreWinEHStackPointersInParent(MF); 3709 } 3710 3711 void X86FrameLowering::restoreWinEHStackPointersInParent( 3712 MachineFunction &MF) const { 3713 // 32-bit functions have to restore stack pointers when control is transferred 3714 // back to the parent function. These blocks are identified as eh pads that 3715 // are not funclet entries. 3716 bool IsSEH = isAsynchronousEHPersonality( 3717 classifyEHPersonality(MF.getFunction().getPersonalityFn())); 3718 for (MachineBasicBlock &MBB : MF) { 3719 bool NeedsRestore = MBB.isEHPad() && !MBB.isEHFuncletEntry(); 3720 if (NeedsRestore) 3721 restoreWin32EHStackPointers(MBB, MBB.begin(), DebugLoc(), 3722 /*RestoreSP=*/IsSEH); 3723 } 3724 } 3725